ia64/xen-unstable
changeset 6539:99914b54f7bf
Merge.
line diff
38.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32 Wed Aug 17 12:34:38 2005 -0800 38.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32 Thu Aug 18 10:40:02 2005 -0800 38.3 @@ -541,7 +541,7 @@ CONFIG_IP_NF_MATCH_IPRANGE=m 38.4 # CONFIG_IP_NF_MATCH_STATE is not set 38.5 # CONFIG_IP_NF_MATCH_CONNTRACK is not set 38.6 # CONFIG_IP_NF_MATCH_OWNER is not set 38.7 -# CONFIG_IP_NF_MATCH_PHYSDEV is not set 38.8 +CONFIG_IP_NF_MATCH_PHYSDEV=y 38.9 # CONFIG_IP_NF_MATCH_ADDRTYPE is not set 38.10 # CONFIG_IP_NF_MATCH_REALM is not set 38.11 # CONFIG_IP_NF_MATCH_SCTP is not set 38.12 @@ -689,7 +689,7 @@ CONFIG_E1000=y 38.13 # CONFIG_HAMACHI is not set 38.14 # CONFIG_YELLOWFIN is not set 38.15 # CONFIG_R8169 is not set 38.16 -# CONFIG_SK98LIN is not set 38.17 +CONFIG_SK98LIN=y 38.18 # CONFIG_VIA_VELOCITY is not set 38.19 CONFIG_TIGON3=y 38.20 # CONFIG_BNX2 is not set
39.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 Wed Aug 17 12:34:38 2005 -0800 39.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 Thu Aug 18 10:40:02 2005 -0800 39.3 @@ -480,7 +480,7 @@ CONFIG_IP_NF_MATCH_IPRANGE=m 39.4 # CONFIG_IP_NF_MATCH_STATE is not set 39.5 # CONFIG_IP_NF_MATCH_CONNTRACK is not set 39.6 # CONFIG_IP_NF_MATCH_OWNER is not set 39.7 -# CONFIG_IP_NF_MATCH_PHYSDEV is not set 39.8 +CONFIG_IP_NF_MATCH_PHYSDEV=y 39.9 # CONFIG_IP_NF_MATCH_ADDRTYPE is not set 39.10 # CONFIG_IP_NF_MATCH_REALM is not set 39.11 # CONFIG_IP_NF_MATCH_SCTP is not set 39.12 @@ -611,7 +611,7 @@ CONFIG_E1000=y 39.13 # CONFIG_HAMACHI is not set 39.14 # CONFIG_YELLOWFIN is not set 39.15 # CONFIG_R8169 is not set 39.16 -# CONFIG_SK98LIN is not set 39.17 +CONFIG_SK98LIN=y 39.18 # CONFIG_VIA_VELOCITY is not set 39.19 CONFIG_TIGON3=y 39.20 # CONFIG_BNX2 is not set
41.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64 Wed Aug 17 12:34:38 2005 -0800 41.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64 Thu Aug 18 10:40:02 2005 -0800 41.3 @@ -1,7 +1,7 @@ 41.4 # 41.5 # Automatically generated make config: don't edit 41.6 -# Linux kernel version: 2.6.12.4-xenU 41.7 -# Mon Aug 15 19:25:22 2005 41.8 +# Linux kernel version: 2.6.12-xenU 41.9 +# Thu Aug 18 11:15:14 2005 41.10 # 41.11 CONFIG_XEN=y 41.12 CONFIG_ARCH_XEN=y 41.13 @@ -270,7 +270,10 @@ CONFIG_IP_ROUTE_FWMARK=y 41.14 CONFIG_IP_ROUTE_MULTIPATH=y 41.15 # CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set 41.16 CONFIG_IP_ROUTE_VERBOSE=y 41.17 -# CONFIG_IP_PNP is not set 41.18 +CONFIG_IP_PNP=y 41.19 +CONFIG_IP_PNP_DHCP=y 41.20 +CONFIG_IP_PNP_BOOTP=y 41.21 +CONFIG_IP_PNP_RARP=y 41.22 CONFIG_NET_IPIP=m 41.23 CONFIG_NET_IPGRE=m 41.24 CONFIG_NET_IPGRE_BROADCAST=y
56.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/swiotlb.c Wed Aug 17 12:34:38 2005 -0800 56.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/swiotlb.c Thu Aug 18 10:40:02 2005 -0800 56.3 @@ -49,13 +49,14 @@ int swiotlb_force; 56.4 * swiotlb_sync_single_*, to see if the memory was in fact allocated by this 56.5 * API. 56.6 */ 56.7 -static char *io_tlb_start, *io_tlb_end; 56.8 +static char *iotlb_virt_start, *iotlb_virt_end; 56.9 +static dma_addr_t iotlb_bus_start, iotlb_bus_end; 56.10 56.11 /* 56.12 - * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and 56.13 - * io_tlb_end. This is command line adjustable via setup_io_tlb_npages. 56.14 + * The number of IO TLB blocks (in groups of 64) betweeen iotlb_virt_start and 56.15 + * iotlb_virt_end. This is command line adjustable via setup_io_tlb_npages. 56.16 */ 56.17 -static unsigned long io_tlb_nslabs; 56.18 +static unsigned long iotlb_nslabs; 56.19 56.20 /* 56.21 * When the IOMMU overflows we return a fallback buffer. This sets the size. 56.22 @@ -88,11 +89,14 @@ static DEFINE_SPINLOCK(io_tlb_lock); 56.23 static int __init 56.24 setup_io_tlb_npages(char *str) 56.25 { 56.26 + /* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */ 56.27 if (isdigit(*str)) { 56.28 - io_tlb_nslabs = simple_strtoul(str, &str, 0) << 56.29 - (PAGE_SHIFT - IO_TLB_SHIFT); 56.30 - /* avoid tail segment of size < IO_TLB_SEGSIZE */ 56.31 - io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); 56.32 + iotlb_nslabs = simple_strtoul(str, &str, 0) << 56.33 + (20 - IO_TLB_SHIFT); 56.34 + iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE); 56.35 + /* Round up to power of two (xen_create_contiguous_region). */ 56.36 + while (iotlb_nslabs & (iotlb_nslabs-1)) 56.37 + iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1); 56.38 } 56.39 if (*str == ',') 56.40 ++str; 56.41 @@ -114,45 +118,55 @@ setup_io_tlb_npages(char *str) 56.42 void 56.43 swiotlb_init_with_default_size (size_t default_size) 56.44 { 56.45 - unsigned long i; 56.46 + unsigned long i, bytes; 56.47 56.48 - if (!io_tlb_nslabs) { 56.49 - io_tlb_nslabs = (default_size >> PAGE_SHIFT); 56.50 - io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); 56.51 + if (!iotlb_nslabs) { 56.52 + iotlb_nslabs = (default_size >> IO_TLB_SHIFT); 56.53 + iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE); 56.54 + /* Round up to power of two (xen_create_contiguous_region). */ 56.55 + while (iotlb_nslabs & (iotlb_nslabs-1)) 56.56 + iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1); 56.57 } 56.58 56.59 + bytes = iotlb_nslabs * (1UL << IO_TLB_SHIFT); 56.60 + 56.61 /* 56.62 * Get IO TLB memory from the low pages 56.63 */ 56.64 - io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * 56.65 - (1 << IO_TLB_SHIFT)); 56.66 - if (!io_tlb_start) 56.67 + iotlb_virt_start = alloc_bootmem_low_pages(bytes); 56.68 + if (!iotlb_virt_start) 56.69 panic("Cannot allocate SWIOTLB buffer"); 56.70 56.71 xen_create_contiguous_region( 56.72 - (unsigned long)io_tlb_start, 56.73 - get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))); 56.74 + (unsigned long)iotlb_virt_start, get_order(bytes)); 56.75 56.76 - io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); 56.77 + iotlb_virt_end = iotlb_virt_start + bytes; 56.78 56.79 /* 56.80 * Allocate and initialize the free list array. This array is used 56.81 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 56.82 - * between io_tlb_start and io_tlb_end. 56.83 + * between iotlb_virt_start and iotlb_virt_end. 56.84 */ 56.85 - io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); 56.86 - for (i = 0; i < io_tlb_nslabs; i++) 56.87 + io_tlb_list = alloc_bootmem(iotlb_nslabs * sizeof(int)); 56.88 + for (i = 0; i < iotlb_nslabs; i++) 56.89 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 56.90 io_tlb_index = 0; 56.91 io_tlb_orig_addr = alloc_bootmem( 56.92 - io_tlb_nslabs * sizeof(*io_tlb_orig_addr)); 56.93 + iotlb_nslabs * sizeof(*io_tlb_orig_addr)); 56.94 56.95 /* 56.96 * Get the overflow emergency buffer 56.97 */ 56.98 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); 56.99 - printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", 56.100 - virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end-1)); 56.101 + iotlb_bus_start = virt_to_bus(iotlb_virt_start); 56.102 + iotlb_bus_end = iotlb_bus_start + bytes; 56.103 + printk(KERN_INFO "Software IO TLB enabled: \n" 56.104 + " Aperture: %lu megabytes\n" 56.105 + " Bus range: 0x%016lx - 0x%016lx\n" 56.106 + " Kernel range: 0x%016lx - 0x%016lx\n", 56.107 + bytes >> 20, 56.108 + (unsigned long)iotlb_bus_start, (unsigned long)iotlb_bus_end, 56.109 + (unsigned long)iotlb_virt_start, (unsigned long)iotlb_virt_end); 56.110 } 56.111 56.112 void 56.113 @@ -240,7 +254,7 @@ map_single(struct device *hwdev, struct 56.114 { 56.115 wrap = index = ALIGN(io_tlb_index, stride); 56.116 56.117 - if (index >= io_tlb_nslabs) 56.118 + if (index >= iotlb_nslabs) 56.119 wrap = index = 0; 56.120 56.121 do { 56.122 @@ -260,7 +274,7 @@ map_single(struct device *hwdev, struct 56.123 IO_TLB_SEGSIZE -1) && io_tlb_list[i]; 56.124 i--) 56.125 io_tlb_list[i] = ++count; 56.126 - dma_addr = io_tlb_start + 56.127 + dma_addr = iotlb_virt_start + 56.128 (index << IO_TLB_SHIFT); 56.129 56.130 /* 56.131 @@ -268,13 +282,13 @@ map_single(struct device *hwdev, struct 56.132 * the next round. 56.133 */ 56.134 io_tlb_index = 56.135 - ((index + nslots) < io_tlb_nslabs 56.136 + ((index + nslots) < iotlb_nslabs 56.137 ? (index + nslots) : 0); 56.138 56.139 goto found; 56.140 } 56.141 index += stride; 56.142 - if (index >= io_tlb_nslabs) 56.143 + if (index >= iotlb_nslabs) 56.144 index = 0; 56.145 } while (index != wrap); 56.146 56.147 @@ -304,7 +318,7 @@ unmap_single(struct device *hwdev, char 56.148 { 56.149 unsigned long flags; 56.150 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 56.151 - int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 56.152 + int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT; 56.153 struct phys_addr buffer = io_tlb_orig_addr[index]; 56.154 56.155 /* 56.156 @@ -345,7 +359,7 @@ unmap_single(struct device *hwdev, char 56.157 static void 56.158 sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir) 56.159 { 56.160 - int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 56.161 + int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT; 56.162 struct phys_addr buffer = io_tlb_orig_addr[index]; 56.163 BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE)); 56.164 __sync_single(buffer, dma_addr, size, dir); 56.165 @@ -431,11 +445,9 @@ void 56.166 swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, 56.167 int dir) 56.168 { 56.169 - char *dma_addr = bus_to_virt(dev_addr); 56.170 - 56.171 BUG_ON(dir == DMA_NONE); 56.172 - if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 56.173 - unmap_single(hwdev, dma_addr, size, dir); 56.174 + if ((dev_addr >= iotlb_bus_start) && (dev_addr < iotlb_bus_end)) 56.175 + unmap_single(hwdev, bus_to_virt(dev_addr), size, dir); 56.176 } 56.177 56.178 /* 56.179 @@ -452,22 +464,18 @@ void 56.180 swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, 56.181 size_t size, int dir) 56.182 { 56.183 - char *dma_addr = bus_to_virt(dev_addr); 56.184 - 56.185 BUG_ON(dir == DMA_NONE); 56.186 - if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 56.187 - sync_single(hwdev, dma_addr, size, dir); 56.188 + if ((dev_addr >= iotlb_bus_start) && (dev_addr < iotlb_bus_end)) 56.189 + sync_single(hwdev, bus_to_virt(dev_addr), size, dir); 56.190 } 56.191 56.192 void 56.193 swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, 56.194 size_t size, int dir) 56.195 { 56.196 - char *dma_addr = bus_to_virt(dev_addr); 56.197 - 56.198 BUG_ON(dir == DMA_NONE); 56.199 - if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 56.200 - sync_single(hwdev, dma_addr, size, dir); 56.201 + if ((dev_addr >= iotlb_bus_start) && (dev_addr < iotlb_bus_end)) 56.202 + sync_single(hwdev, bus_to_virt(dev_addr), size, dir); 56.203 } 56.204 56.205 /* 56.206 @@ -603,11 +611,9 @@ void 56.207 swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address, 56.208 size_t size, enum dma_data_direction direction) 56.209 { 56.210 - char *dma_addr = bus_to_virt(dma_address); 56.211 - 56.212 BUG_ON(direction == DMA_NONE); 56.213 - if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 56.214 - unmap_single(hwdev, dma_addr, size, direction); 56.215 + if ((dma_address >= iotlb_bus_start) && (dma_address < iotlb_bus_end)) 56.216 + unmap_single(hwdev, bus_to_virt(dma_address), size, direction); 56.217 } 56.218 56.219 int
60.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c Wed Aug 17 12:34:38 2005 -0800 60.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c Thu Aug 18 10:40:02 2005 -0800 60.3 @@ -59,124 +59,124 @@ 60.4 #ifndef CONFIG_XEN_SHADOW_MODE 60.5 void xen_l1_entry_update(pte_t *ptr, pte_t val) 60.6 { 60.7 - mmu_update_t u; 60.8 - u.ptr = virt_to_machine(ptr); 60.9 - u.val = pte_val_ma(val); 60.10 - BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); 60.11 + mmu_update_t u; 60.12 + u.ptr = virt_to_machine(ptr); 60.13 + u.val = pte_val_ma(val); 60.14 + BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); 60.15 } 60.16 60.17 void xen_l2_entry_update(pmd_t *ptr, pmd_t val) 60.18 { 60.19 - mmu_update_t u; 60.20 - u.ptr = virt_to_machine(ptr); 60.21 - u.val = pmd_val_ma(val); 60.22 - BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); 60.23 + mmu_update_t u; 60.24 + u.ptr = virt_to_machine(ptr); 60.25 + u.val = pmd_val_ma(val); 60.26 + BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); 60.27 } 60.28 60.29 #ifdef CONFIG_X86_PAE 60.30 void xen_l3_entry_update(pud_t *ptr, pud_t val) 60.31 { 60.32 - mmu_update_t u; 60.33 - u.ptr = virt_to_machine(ptr); 60.34 - u.val = pud_val_ma(val); 60.35 - BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); 60.36 + mmu_update_t u; 60.37 + u.ptr = virt_to_machine(ptr); 60.38 + u.val = pud_val_ma(val); 60.39 + BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); 60.40 } 60.41 #endif 60.42 60.43 #ifdef CONFIG_X86_64 60.44 void xen_l3_entry_update(pud_t *ptr, pud_t val) 60.45 { 60.46 - mmu_update_t u; 60.47 - u.ptr = virt_to_machine(ptr); 60.48 - u.val = val.pud; 60.49 - BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); 60.50 + mmu_update_t u; 60.51 + u.ptr = virt_to_machine(ptr); 60.52 + u.val = val.pud; 60.53 + BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); 60.54 } 60.55 60.56 void xen_l4_entry_update(pgd_t *ptr, pgd_t val) 60.57 { 60.58 - mmu_update_t u; 60.59 - u.ptr = virt_to_machine(ptr); 60.60 - u.val = val.pgd; 60.61 - BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); 60.62 + mmu_update_t u; 60.63 + u.ptr = virt_to_machine(ptr); 60.64 + u.val = val.pgd; 60.65 + BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); 60.66 } 60.67 #endif /* CONFIG_X86_64 */ 60.68 #endif /* CONFIG_XEN_SHADOW_MODE */ 60.69 60.70 void xen_machphys_update(unsigned long mfn, unsigned long pfn) 60.71 { 60.72 - mmu_update_t u; 60.73 - u.ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; 60.74 - u.val = pfn; 60.75 - BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); 60.76 + mmu_update_t u; 60.77 + u.ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; 60.78 + u.val = pfn; 60.79 + BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0); 60.80 } 60.81 60.82 void xen_pt_switch(unsigned long ptr) 60.83 { 60.84 - struct mmuext_op op; 60.85 - op.cmd = MMUEXT_NEW_BASEPTR; 60.86 - op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); 60.87 - BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.88 + struct mmuext_op op; 60.89 + op.cmd = MMUEXT_NEW_BASEPTR; 60.90 + op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); 60.91 + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.92 } 60.93 60.94 void xen_new_user_pt(unsigned long ptr) 60.95 { 60.96 - struct mmuext_op op; 60.97 - op.cmd = MMUEXT_NEW_USER_BASEPTR; 60.98 - op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); 60.99 - BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.100 + struct mmuext_op op; 60.101 + op.cmd = MMUEXT_NEW_USER_BASEPTR; 60.102 + op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); 60.103 + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.104 } 60.105 60.106 void xen_tlb_flush(void) 60.107 { 60.108 - struct mmuext_op op; 60.109 - op.cmd = MMUEXT_TLB_FLUSH_LOCAL; 60.110 - BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.111 + struct mmuext_op op; 60.112 + op.cmd = MMUEXT_TLB_FLUSH_LOCAL; 60.113 + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.114 } 60.115 60.116 void xen_invlpg(unsigned long ptr) 60.117 { 60.118 - struct mmuext_op op; 60.119 - op.cmd = MMUEXT_INVLPG_LOCAL; 60.120 - op.linear_addr = ptr & PAGE_MASK; 60.121 - BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.122 + struct mmuext_op op; 60.123 + op.cmd = MMUEXT_INVLPG_LOCAL; 60.124 + op.linear_addr = ptr & PAGE_MASK; 60.125 + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.126 } 60.127 60.128 #ifdef CONFIG_SMP 60.129 60.130 void xen_tlb_flush_all(void) 60.131 { 60.132 - struct mmuext_op op; 60.133 - op.cmd = MMUEXT_TLB_FLUSH_ALL; 60.134 - BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.135 + struct mmuext_op op; 60.136 + op.cmd = MMUEXT_TLB_FLUSH_ALL; 60.137 + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.138 } 60.139 60.140 void xen_tlb_flush_mask(cpumask_t *mask) 60.141 { 60.142 - struct mmuext_op op; 60.143 - if ( cpus_empty(*mask) ) 60.144 - return; 60.145 - op.cmd = MMUEXT_TLB_FLUSH_MULTI; 60.146 - op.vcpumask = mask->bits; 60.147 - BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.148 + struct mmuext_op op; 60.149 + if ( cpus_empty(*mask) ) 60.150 + return; 60.151 + op.cmd = MMUEXT_TLB_FLUSH_MULTI; 60.152 + op.vcpumask = mask->bits; 60.153 + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.154 } 60.155 60.156 void xen_invlpg_all(unsigned long ptr) 60.157 { 60.158 - struct mmuext_op op; 60.159 - op.cmd = MMUEXT_INVLPG_ALL; 60.160 - op.linear_addr = ptr & PAGE_MASK; 60.161 - BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.162 + struct mmuext_op op; 60.163 + op.cmd = MMUEXT_INVLPG_ALL; 60.164 + op.linear_addr = ptr & PAGE_MASK; 60.165 + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.166 } 60.167 60.168 void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr) 60.169 { 60.170 - struct mmuext_op op; 60.171 - if ( cpus_empty(*mask) ) 60.172 - return; 60.173 - op.cmd = MMUEXT_INVLPG_MULTI; 60.174 - op.vcpumask = mask->bits; 60.175 - op.linear_addr = ptr & PAGE_MASK; 60.176 - BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.177 + struct mmuext_op op; 60.178 + if ( cpus_empty(*mask) ) 60.179 + return; 60.180 + op.cmd = MMUEXT_INVLPG_MULTI; 60.181 + op.vcpumask = mask->bits; 60.182 + op.linear_addr = ptr & PAGE_MASK; 60.183 + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.184 } 60.185 60.186 #endif /* CONFIG_SMP */ 60.187 @@ -184,221 +184,281 @@ void xen_invlpg_mask(cpumask_t *mask, un 60.188 #ifndef CONFIG_XEN_SHADOW_MODE 60.189 void xen_pgd_pin(unsigned long ptr) 60.190 { 60.191 - struct mmuext_op op; 60.192 + struct mmuext_op op; 60.193 #ifdef CONFIG_X86_64 60.194 - op.cmd = MMUEXT_PIN_L4_TABLE; 60.195 + op.cmd = MMUEXT_PIN_L4_TABLE; 60.196 #elif defined(CONFIG_X86_PAE) 60.197 - op.cmd = MMUEXT_PIN_L3_TABLE; 60.198 + op.cmd = MMUEXT_PIN_L3_TABLE; 60.199 #else 60.200 - op.cmd = MMUEXT_PIN_L2_TABLE; 60.201 + op.cmd = MMUEXT_PIN_L2_TABLE; 60.202 #endif 60.203 - op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); 60.204 - BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.205 + op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); 60.206 + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.207 } 60.208 60.209 void xen_pgd_unpin(unsigned long ptr) 60.210 { 60.211 - struct mmuext_op op; 60.212 - op.cmd = MMUEXT_UNPIN_TABLE; 60.213 - op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); 60.214 - BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.215 + struct mmuext_op op; 60.216 + op.cmd = MMUEXT_UNPIN_TABLE; 60.217 + op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); 60.218 + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.219 } 60.220 60.221 void xen_pte_pin(unsigned long ptr) 60.222 { 60.223 - struct mmuext_op op; 60.224 - op.cmd = MMUEXT_PIN_L1_TABLE; 60.225 - op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); 60.226 - BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.227 + struct mmuext_op op; 60.228 + op.cmd = MMUEXT_PIN_L1_TABLE; 60.229 + op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); 60.230 + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.231 } 60.232 60.233 void xen_pte_unpin(unsigned long ptr) 60.234 { 60.235 - struct mmuext_op op; 60.236 - op.cmd = MMUEXT_UNPIN_TABLE; 60.237 - op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); 60.238 - BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.239 + struct mmuext_op op; 60.240 + op.cmd = MMUEXT_UNPIN_TABLE; 60.241 + op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); 60.242 + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.243 } 60.244 60.245 #ifdef CONFIG_X86_64 60.246 void xen_pud_pin(unsigned long ptr) 60.247 { 60.248 - struct mmuext_op op; 60.249 - op.cmd = MMUEXT_PIN_L3_TABLE; 60.250 - op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); 60.251 - BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.252 + struct mmuext_op op; 60.253 + op.cmd = MMUEXT_PIN_L3_TABLE; 60.254 + op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); 60.255 + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.256 } 60.257 60.258 void xen_pud_unpin(unsigned long ptr) 60.259 { 60.260 - struct mmuext_op op; 60.261 - op.cmd = MMUEXT_UNPIN_TABLE; 60.262 - op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); 60.263 - BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.264 + struct mmuext_op op; 60.265 + op.cmd = MMUEXT_UNPIN_TABLE; 60.266 + op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); 60.267 + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.268 } 60.269 60.270 void xen_pmd_pin(unsigned long ptr) 60.271 { 60.272 - struct mmuext_op op; 60.273 - op.cmd = MMUEXT_PIN_L2_TABLE; 60.274 - op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); 60.275 - BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.276 + struct mmuext_op op; 60.277 + op.cmd = MMUEXT_PIN_L2_TABLE; 60.278 + op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); 60.279 + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.280 } 60.281 60.282 void xen_pmd_unpin(unsigned long ptr) 60.283 { 60.284 - struct mmuext_op op; 60.285 - op.cmd = MMUEXT_UNPIN_TABLE; 60.286 - op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); 60.287 - BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.288 + struct mmuext_op op; 60.289 + op.cmd = MMUEXT_UNPIN_TABLE; 60.290 + op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT); 60.291 + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.292 } 60.293 #endif /* CONFIG_X86_64 */ 60.294 #endif /* CONFIG_XEN_SHADOW_MODE */ 60.295 60.296 void xen_set_ldt(unsigned long ptr, unsigned long len) 60.297 { 60.298 - struct mmuext_op op; 60.299 - op.cmd = MMUEXT_SET_LDT; 60.300 - op.linear_addr = ptr; 60.301 - op.nr_ents = len; 60.302 - BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.303 + struct mmuext_op op; 60.304 + op.cmd = MMUEXT_SET_LDT; 60.305 + op.linear_addr = ptr; 60.306 + op.nr_ents = len; 60.307 + BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 60.308 +} 60.309 + 60.310 +/* 60.311 + * Bitmap is indexed by page number. If bit is set, the page is part of a 60.312 + * xen_create_contiguous_region() area of memory. 60.313 + */ 60.314 +unsigned long *contiguous_bitmap; 60.315 + 60.316 +static void contiguous_bitmap_set( 60.317 + unsigned long first_page, unsigned long nr_pages) 60.318 +{ 60.319 + unsigned long start_off, end_off, curr_idx, end_idx; 60.320 + 60.321 + curr_idx = first_page / BITS_PER_LONG; 60.322 + start_off = first_page & (BITS_PER_LONG-1); 60.323 + end_idx = (first_page + nr_pages) / BITS_PER_LONG; 60.324 + end_off = (first_page + nr_pages) & (BITS_PER_LONG-1); 60.325 + 60.326 + if (curr_idx == end_idx) { 60.327 + contiguous_bitmap[curr_idx] |= 60.328 + ((1UL<<end_off)-1) & -(1UL<<start_off); 60.329 + } else { 60.330 + contiguous_bitmap[curr_idx] |= -(1UL<<start_off); 60.331 + while ( ++curr_idx < end_idx ) 60.332 + contiguous_bitmap[curr_idx] = ~0UL; 60.333 + contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1; 60.334 + } 60.335 +} 60.336 + 60.337 +static void contiguous_bitmap_clear( 60.338 + unsigned long first_page, unsigned long nr_pages) 60.339 +{ 60.340 + unsigned long start_off, end_off, curr_idx, end_idx; 60.341 + 60.342 + curr_idx = first_page / BITS_PER_LONG; 60.343 + start_off = first_page & (BITS_PER_LONG-1); 60.344 + end_idx = (first_page + nr_pages) / BITS_PER_LONG; 60.345 + end_off = (first_page + nr_pages) & (BITS_PER_LONG-1); 60.346 + 60.347 + if (curr_idx == end_idx) { 60.348 + contiguous_bitmap[curr_idx] &= 60.349 + -(1UL<<end_off) | ((1UL<<start_off)-1); 60.350 + } else { 60.351 + contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1; 60.352 + while ( ++curr_idx != end_idx ) 60.353 + contiguous_bitmap[curr_idx] = 0; 60.354 + contiguous_bitmap[curr_idx] &= -(1UL<<end_off); 60.355 + } 60.356 } 60.357 60.358 /* Ensure multi-page extents are contiguous in machine memory. */ 60.359 void xen_create_contiguous_region(unsigned long vstart, unsigned int order) 60.360 { 60.361 - pgd_t *pgd; 60.362 - pud_t *pud; 60.363 - pmd_t *pmd; 60.364 - pte_t *pte; 60.365 - unsigned long mfn, i, flags; 60.366 + pgd_t *pgd; 60.367 + pud_t *pud; 60.368 + pmd_t *pmd; 60.369 + pte_t *pte; 60.370 + unsigned long mfn, i, flags; 60.371 60.372 - scrub_pages(vstart, 1 << order); 60.373 + scrub_pages(vstart, 1 << order); 60.374 60.375 - balloon_lock(flags); 60.376 + balloon_lock(flags); 60.377 60.378 - /* 1. Zap current PTEs, giving away the underlying pages. */ 60.379 - for (i = 0; i < (1<<order); i++) { 60.380 - pgd = pgd_offset_k(vstart + (i*PAGE_SIZE)); 60.381 - pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE))); 60.382 - pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE))); 60.383 - pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 60.384 - mfn = pte_mfn(*pte); 60.385 - BUG_ON(HYPERVISOR_update_va_mapping( 60.386 - vstart + (i*PAGE_SIZE), __pte_ma(0), 0)); 60.387 - phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = 60.388 - INVALID_P2M_ENTRY; 60.389 - BUG_ON(HYPERVISOR_dom_mem_op( 60.390 - MEMOP_decrease_reservation, &mfn, 1, 0) != 1); 60.391 - } 60.392 + /* 1. Zap current PTEs, giving away the underlying pages. */ 60.393 + for (i = 0; i < (1<<order); i++) { 60.394 + pgd = pgd_offset_k(vstart + (i*PAGE_SIZE)); 60.395 + pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE))); 60.396 + pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE))); 60.397 + pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 60.398 + mfn = pte_mfn(*pte); 60.399 + BUG_ON(HYPERVISOR_update_va_mapping( 60.400 + vstart + (i*PAGE_SIZE), __pte_ma(0), 0)); 60.401 + phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = 60.402 + INVALID_P2M_ENTRY; 60.403 + BUG_ON(HYPERVISOR_dom_mem_op( 60.404 + MEMOP_decrease_reservation, &mfn, 1, 0) != 1); 60.405 + } 60.406 60.407 - /* 2. Get a new contiguous memory extent. */ 60.408 - BUG_ON(HYPERVISOR_dom_mem_op( 60.409 - MEMOP_increase_reservation, &mfn, 1, order | (32<<8)) != 1); 60.410 + /* 2. Get a new contiguous memory extent. */ 60.411 + BUG_ON(HYPERVISOR_dom_mem_op( 60.412 + MEMOP_increase_reservation, &mfn, 1, order | (32<<8)) != 1); 60.413 60.414 - /* 3. Map the new extent in place of old pages. */ 60.415 - for (i = 0; i < (1<<order); i++) { 60.416 - BUG_ON(HYPERVISOR_update_va_mapping( 60.417 - vstart + (i*PAGE_SIZE), 60.418 - __pte_ma(((mfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL), 0)); 60.419 - xen_machphys_update(mfn+i, (__pa(vstart)>>PAGE_SHIFT)+i); 60.420 - phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = mfn+i; 60.421 - } 60.422 + /* 3. Map the new extent in place of old pages. */ 60.423 + for (i = 0; i < (1<<order); i++) { 60.424 + BUG_ON(HYPERVISOR_update_va_mapping( 60.425 + vstart + (i*PAGE_SIZE), 60.426 + pfn_pte_ma(mfn+i, PAGE_KERNEL), 0)); 60.427 + xen_machphys_update(mfn+i, (__pa(vstart)>>PAGE_SHIFT)+i); 60.428 + phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = mfn+i; 60.429 + } 60.430 60.431 - flush_tlb_all(); 60.432 + flush_tlb_all(); 60.433 60.434 - balloon_unlock(flags); 60.435 + contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT, 1UL << order); 60.436 + 60.437 + balloon_unlock(flags); 60.438 } 60.439 60.440 void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order) 60.441 { 60.442 - pgd_t *pgd; 60.443 - pud_t *pud; 60.444 - pmd_t *pmd; 60.445 - pte_t *pte; 60.446 - unsigned long mfn, i, flags; 60.447 + pgd_t *pgd; 60.448 + pud_t *pud; 60.449 + pmd_t *pmd; 60.450 + pte_t *pte; 60.451 + unsigned long mfn, i, flags; 60.452 60.453 - scrub_pages(vstart, 1 << order); 60.454 + scrub_pages(vstart, 1 << order); 60.455 60.456 - balloon_lock(flags); 60.457 + balloon_lock(flags); 60.458 + 60.459 + contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order); 60.460 60.461 - /* 1. Zap current PTEs, giving away the underlying pages. */ 60.462 - for (i = 0; i < (1<<order); i++) { 60.463 - pgd = pgd_offset_k(vstart + (i*PAGE_SIZE)); 60.464 - pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE))); 60.465 - pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE))); 60.466 - pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 60.467 - mfn = pte_mfn(*pte); 60.468 - BUG_ON(HYPERVISOR_update_va_mapping( 60.469 - vstart + (i*PAGE_SIZE), __pte_ma(0), 0)); 60.470 - phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = 60.471 - INVALID_P2M_ENTRY; 60.472 - BUG_ON(HYPERVISOR_dom_mem_op( 60.473 - MEMOP_decrease_reservation, &mfn, 1, 0) != 1); 60.474 - } 60.475 + /* 1. Zap current PTEs, giving away the underlying pages. */ 60.476 + for (i = 0; i < (1<<order); i++) { 60.477 + pgd = pgd_offset_k(vstart + (i*PAGE_SIZE)); 60.478 + pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE))); 60.479 + pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE))); 60.480 + pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 60.481 + mfn = pte_mfn(*pte); 60.482 + BUG_ON(HYPERVISOR_update_va_mapping( 60.483 + vstart + (i*PAGE_SIZE), __pte_ma(0), 0)); 60.484 + phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = 60.485 + INVALID_P2M_ENTRY; 60.486 + BUG_ON(HYPERVISOR_dom_mem_op( 60.487 + MEMOP_decrease_reservation, &mfn, 1, 0) != 1); 60.488 + } 60.489 60.490 - /* 2. Map new pages in place of old pages. */ 60.491 - for (i = 0; i < (1<<order); i++) { 60.492 - BUG_ON(HYPERVISOR_dom_mem_op( 60.493 - MEMOP_increase_reservation, &mfn, 1, 0) != 1); 60.494 - BUG_ON(HYPERVISOR_update_va_mapping( 60.495 - vstart + (i*PAGE_SIZE), 60.496 - __pte_ma((mfn<<PAGE_SHIFT)|__PAGE_KERNEL), 0)); 60.497 - xen_machphys_update(mfn, (__pa(vstart)>>PAGE_SHIFT)+i); 60.498 - phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = mfn; 60.499 - } 60.500 + /* 2. Map new pages in place of old pages. */ 60.501 + for (i = 0; i < (1<<order); i++) { 60.502 + BUG_ON(HYPERVISOR_dom_mem_op( 60.503 + MEMOP_increase_reservation, &mfn, 1, 0) != 1); 60.504 + BUG_ON(HYPERVISOR_update_va_mapping( 60.505 + vstart + (i*PAGE_SIZE), 60.506 + pfn_pte_ma(mfn, PAGE_KERNEL), 0)); 60.507 + xen_machphys_update(mfn, (__pa(vstart)>>PAGE_SHIFT)+i); 60.508 + phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = mfn; 60.509 + } 60.510 60.511 - flush_tlb_all(); 60.512 + flush_tlb_all(); 60.513 60.514 - balloon_unlock(flags); 60.515 + balloon_unlock(flags); 60.516 } 60.517 60.518 60.519 unsigned long allocate_empty_lowmem_region(unsigned long pages) 60.520 { 60.521 - pgd_t *pgd; 60.522 - pud_t *pud; 60.523 - pmd_t *pmd; 60.524 - pte_t *pte; 60.525 - unsigned long *pfn_array; 60.526 - unsigned long vstart; 60.527 - unsigned long i; 60.528 - unsigned int order = get_order(pages*PAGE_SIZE); 60.529 + pgd_t *pgd; 60.530 + pud_t *pud; 60.531 + pmd_t *pmd; 60.532 + pte_t *pte; 60.533 + unsigned long *pfn_array; 60.534 + unsigned long vstart; 60.535 + unsigned long i; 60.536 + unsigned int order = get_order(pages*PAGE_SIZE); 60.537 60.538 - vstart = __get_free_pages(GFP_KERNEL, order); 60.539 - if ( vstart == 0 ) 60.540 - return 0UL; 60.541 + vstart = __get_free_pages(GFP_KERNEL, order); 60.542 + if (vstart == 0) 60.543 + return 0UL; 60.544 60.545 - scrub_pages(vstart, 1 << order); 60.546 + scrub_pages(vstart, 1 << order); 60.547 60.548 - pfn_array = vmalloc((1<<order) * sizeof(*pfn_array)); 60.549 - if ( pfn_array == NULL ) 60.550 - BUG(); 60.551 + pfn_array = vmalloc((1<<order) * sizeof(*pfn_array)); 60.552 + BUG_ON(pfn_array == NULL); 60.553 60.554 - for ( i = 0; i < (1<<order); i++ ) 60.555 - { 60.556 - pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE))); 60.557 - pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE))); 60.558 - pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE))); 60.559 - pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 60.560 - pfn_array[i] = pte_mfn(*pte); 60.561 + for (i = 0; i < (1<<order); i++) { 60.562 + pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE))); 60.563 + pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE))); 60.564 + pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE))); 60.565 + pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 60.566 + pfn_array[i] = pte_mfn(*pte); 60.567 #ifdef CONFIG_X86_64 60.568 - xen_l1_entry_update(pte, __pte(0)); 60.569 + xen_l1_entry_update(pte, __pte(0)); 60.570 #else 60.571 - BUG_ON(HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), 60.572 - __pte_ma(0), 0)); 60.573 + BUG_ON(HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), 60.574 + __pte_ma(0), 0)); 60.575 #endif 60.576 - phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = 60.577 - INVALID_P2M_ENTRY; 60.578 - } 60.579 + phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = 60.580 + INVALID_P2M_ENTRY; 60.581 + } 60.582 60.583 - flush_tlb_all(); 60.584 + flush_tlb_all(); 60.585 60.586 - balloon_put_pages(pfn_array, 1 << order); 60.587 + balloon_put_pages(pfn_array, 1 << order); 60.588 60.589 - vfree(pfn_array); 60.590 + vfree(pfn_array); 60.591 60.592 - return vstart; 60.593 + return vstart; 60.594 } 60.595 60.596 EXPORT_SYMBOL(allocate_empty_lowmem_region); 60.597 + 60.598 +/* 60.599 + * Local variables: 60.600 + * c-file-style: "linux" 60.601 + * indent-tabs-mode: t 60.602 + * c-indent-level: 8 60.603 + * c-basic-offset: 8 60.604 + * tab-width: 8 60.605 + * End: 60.606 + */
61.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c Wed Aug 17 12:34:38 2005 -0800 61.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c Thu Aug 18 10:40:02 2005 -0800 61.3 @@ -41,6 +41,8 @@ 61.4 #include <asm/sections.h> 61.5 #include <asm-xen/hypervisor.h> 61.6 61.7 +extern unsigned long *contiguous_bitmap; 61.8 + 61.9 #if defined(CONFIG_SWIOTLB) 61.10 extern void swiotlb_init(void); 61.11 int swiotlb; 61.12 @@ -637,6 +639,11 @@ void __init mem_init(void) 61.13 int bad_ppro; 61.14 unsigned long pfn; 61.15 61.16 + contiguous_bitmap = alloc_bootmem_low_pages( 61.17 + (max_low_pfn + 2*BITS_PER_LONG) >> 3); 61.18 + BUG_ON(!contiguous_bitmap); 61.19 + memset(contiguous_bitmap, 0, (max_low_pfn + 2*BITS_PER_LONG) >> 3); 61.20 + 61.21 #if defined(CONFIG_SWIOTLB) 61.22 swiotlb_init(); 61.23 #endif
62.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c Wed Aug 17 12:34:38 2005 -0800 62.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c Thu Aug 18 10:40:02 2005 -0800 62.3 @@ -300,17 +300,17 @@ void __init bt_iounmap(void *addr, unsig 62.4 62.5 62.6 static int direct_remap_area_pte_fn(pte_t *pte, 62.7 - struct page *pte_page, 62.8 - unsigned long address, 62.9 - void *data) 62.10 + struct page *pte_page, 62.11 + unsigned long address, 62.12 + void *data) 62.13 { 62.14 - mmu_update_t **v = (mmu_update_t **)data; 62.15 + mmu_update_t **v = (mmu_update_t **)data; 62.16 62.17 - (*v)->ptr = (pfn_to_mfn(page_to_pfn(pte_page)) << PAGE_SHIFT) 62.18 - | ((unsigned long)pte & ~PAGE_MASK); 62.19 - (*v)++; 62.20 + (*v)->ptr = ((physaddr_t)pfn_to_mfn(page_to_pfn(pte_page)) << 62.21 + PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK); 62.22 + (*v)++; 62.23 62.24 - return 0; 62.25 + return 0; 62.26 } 62.27 62.28 int direct_remap_area_pages(struct mm_struct *mm, 62.29 @@ -397,6 +397,16 @@ int touch_pte_range(struct mm_struct *mm 62.30 } 62.31 62.32 return generic_page_range(mm, address, size, f, NULL); 62.33 -} 62.34 +} 62.35 62.36 EXPORT_SYMBOL(touch_pte_range); 62.37 + 62.38 +/* 62.39 + * Local variables: 62.40 + * c-file-style: "linux" 62.41 + * indent-tabs-mode: t 62.42 + * c-indent-level: 8 62.43 + * c-basic-offset: 8 62.44 + * tab-width: 8 62.45 + * End: 62.46 + */
82.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c Wed Aug 17 12:34:38 2005 -0800 82.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c Thu Aug 18 10:40:02 2005 -0800 82.3 @@ -426,16 +426,10 @@ static __init void parse_cmdline_early ( 82.4 #ifdef CONFIG_XEN 82.5 static void __init contig_initmem_init(void) 82.6 { 82.7 - unsigned long bootmap_size, bootmap; 82.8 - 82.9 - bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT; 82.10 - bootmap = start_pfn; 82.11 - bootmap_size = init_bootmem(bootmap, end_pfn); 82.12 - reserve_bootmem(bootmap, bootmap_size); 82.13 - 82.14 - free_bootmem(start_pfn << PAGE_SHIFT, (end_pfn - start_pfn) << PAGE_SHIFT); 82.15 - reserve_bootmem(0, (PFN_PHYS(start_pfn) + 82.16 - bootmap_size + PAGE_SIZE-1)); 82.17 + unsigned long bootmap_size = init_bootmem(start_pfn, end_pfn); 82.18 + free_bootmem(0, end_pfn << PAGE_SHIFT); 82.19 + /* XXX KAF: Why can't we leave low 1MB of memory free? */ 82.20 + reserve_bootmem(0, (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE-1)); 82.21 } 82.22 #else 82.23 static void __init contig_initmem_init(void)
89.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c Wed Aug 17 12:34:38 2005 -0800 89.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c Thu Aug 18 10:40:02 2005 -0800 89.3 @@ -40,6 +40,12 @@ 89.4 #include <asm/proto.h> 89.5 #include <asm/smp.h> 89.6 89.7 +extern unsigned long *contiguous_bitmap; 89.8 + 89.9 +#if defined(CONFIG_SWIOTLB) 89.10 +extern void swiotlb_init(void); 89.11 +#endif 89.12 + 89.13 #ifndef Dprintk 89.14 #define Dprintk(x...) 89.15 #endif 89.16 @@ -794,8 +800,12 @@ void __init mem_init(void) 89.17 int codesize, reservedpages, datasize, initsize; 89.18 int tmp; 89.19 89.20 + contiguous_bitmap = alloc_bootmem_low_pages( 89.21 + (end_pfn + 2*BITS_PER_LONG) >> 3); 89.22 + BUG_ON(!contiguous_bitmap); 89.23 + memset(contiguous_bitmap, 0, (end_pfn + 2*BITS_PER_LONG) >> 3); 89.24 + 89.25 #if defined(CONFIG_SWIOTLB) 89.26 - extern void swiotlb_init(void); 89.27 swiotlb_init(); 89.28 #endif 89.29
92.1 --- a/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c Wed Aug 17 12:34:38 2005 -0800 92.2 +++ b/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c Thu Aug 18 10:40:02 2005 -0800 92.3 @@ -213,9 +213,7 @@ static void balloon_process(void *unused 92.4 { 92.5 BUG_ON(HYPERVISOR_update_va_mapping( 92.6 (unsigned long)__va(pfn << PAGE_SHIFT), 92.7 - __pte_ma((mfn_list[i] << PAGE_SHIFT) | 92.8 - pgprot_val(PAGE_KERNEL)), 92.9 - 0)); 92.10 + pfn_pte_ma(mfn_list[i], PAGE_KERNEL), 0)); 92.11 } 92.12 92.13 /* Finally, relinquish the memory back to the system allocator. */
94.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c Wed Aug 17 12:34:38 2005 -0800 94.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c Thu Aug 18 10:40:02 2005 -0800 94.3 @@ -406,21 +406,15 @@ static void dispatch_probe(blkif_t *blki 94.4 #endif 94.5 94.6 94.7 -#ifdef CONFIG_XEN_BLKDEV_TAP_BE 94.8 if ( HYPERVISOR_update_va_mapping_otherdomain( 94.9 MMAP_VADDR(pending_idx, 0), 94.10 - (pte_t) { (req->frame_and_sects[0] & PAGE_MASK) | __PAGE_KERNEL }, 94.11 + pfn_pte_ma(req->frame_and_sects[0] >> PAGE_SHIFT, PAGE_KERNEL), 94.12 +#ifdef CONFIG_XEN_BLKDEV_TAP_BE 94.13 0, (blkif->is_blktap ? ID_TO_DOM(req->id) : blkif->domid) ) ) 94.14 - 94.15 - goto out; 94.16 #else 94.17 - if ( HYPERVISOR_update_va_mapping_otherdomain( 94.18 - MMAP_VADDR(pending_idx, 0), 94.19 - (pte_t) { (req->frame_and_sects[0] & PAGE_MASK) | __PAGE_KERNEL }, 94.20 - 0, blkif->domid) ) 94.21 - 94.22 + 0, blkif->domid) ) 94.23 +#endif 94.24 goto out; 94.25 -#endif 94.26 #endif /* endif CONFIG_XEN_BLKDEV_GRANT */ 94.27 94.28 rsp = vbd_probe(blkif, (vdisk_t *)MMAP_VADDR(pending_idx, 0),
123.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h Wed Aug 17 12:34:38 2005 -0800 123.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h Thu Aug 18 10:40:02 2005 -0800 123.3 @@ -26,7 +26,9 @@ address_needs_mapping(struct device *hwd 123.4 static inline int 123.5 range_straddles_page_boundary(void *p, size_t size) 123.6 { 123.7 - return ((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE); 123.8 + extern unsigned long *contiguous_bitmap; 123.9 + return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) && 123.10 + !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap)); 123.11 } 123.12 123.13 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
192.1 --- a/tools/examples/network-bridge Wed Aug 17 12:34:38 2005 -0800 192.2 +++ b/tools/examples/network-bridge Thu Aug 18 10:40:02 2005 -0800 192.3 @@ -51,7 +51,7 @@ for arg ; do export "${arg}" ; done 192.4 192.5 bridge=${bridge:-xen-br0} 192.6 netdev=${netdev:-eth0} 192.7 -antispoof=${antispoof:-yes} 192.8 +antispoof=${antispoof:-no} 192.9 192.10 echo "*network $OP bridge=$bridge netdev=$netdev antispoof=$antispoof" >&2 192.11
290.1 --- a/tools/python/xen/xm/create.py Wed Aug 17 12:34:38 2005 -0800 290.2 +++ b/tools/python/xen/xm/create.py Thu Aug 18 10:40:02 2005 -0800 290.3 @@ -23,6 +23,7 @@ import string 290.4 import sys 290.5 import socket 290.6 import commands 290.7 +import time 290.8 290.9 import xen.lowlevel.xc 290.10 290.11 @@ -674,18 +675,33 @@ def get_dom0_alloc(): 290.12 return 0 290.13 290.14 def balloon_out(dom0_min_mem, opts): 290.15 - """Balloon out to get memory for domU, if necessarily""" 290.16 + """Balloon out memory from dom0 if necessary""" 290.17 SLACK = 4 290.18 + timeout = 20 # 2s 290.19 + ret = 0 290.20 290.21 xc = xen.lowlevel.xc.new() 290.22 pinfo = xc.physinfo() 290.23 - free_mem = pinfo['free_pages']/256 290.24 - if free_mem < opts.vals.memory + SLACK: 290.25 - need_mem = opts.vals.memory + SLACK - free_mem 290.26 - cur_alloc = get_dom0_alloc() 290.27 - if cur_alloc - need_mem >= dom0_min_mem: 290.28 - server.xend_domain_mem_target_set(0, cur_alloc - need_mem) 290.29 + free_mem = pinfo['free_pages'] / 256 290.30 + domU_need_mem = opts.vals.memory + SLACK 290.31 + 290.32 + dom0_cur_alloc = get_dom0_alloc() 290.33 + dom0_new_alloc = dom0_cur_alloc - (domU_need_mem - free_mem) 290.34 + 290.35 + if free_mem < domU_need_mem and dom0_new_alloc >= dom0_min_mem: 290.36 + 290.37 + server.xend_domain_mem_target_set(0, dom0_new_alloc) 290.38 + 290.39 + while dom0_cur_alloc > dom0_new_alloc and timeout > 0: 290.40 + time.sleep(0.1) # sleep 100ms 290.41 + dom0_cur_alloc = get_dom0_alloc() 290.42 + timeout -= 1 290.43 + 290.44 + if dom0_cur_alloc > dom0_new_alloc: 290.45 + ret = 1 290.46 + 290.47 del xc 290.48 + return ret 290.49 290.50 def main(argv): 290.51 random.seed() 290.52 @@ -717,7 +733,8 @@ def main(argv): 290.53 else: 290.54 dom0_min_mem = xroot.get_dom0_min_mem() 290.55 if dom0_min_mem != 0: 290.56 - balloon_out(dom0_min_mem, opts) 290.57 + if balloon_out(dom0_min_mem, opts): 290.58 + return 290.59 290.60 dom = make_domain(opts, config) 290.61 if opts.vals.console_autoconnect:
293.1 --- a/tools/python/xen/xm/main.py Wed Aug 17 12:34:38 2005 -0800 293.2 +++ b/tools/python/xen/xm/main.py Thu Aug 18 10:40:02 2005 -0800 293.3 @@ -200,7 +200,11 @@ def xm_migrate(args): 293.4 def xm_list(args): 293.5 use_long = 0 293.6 show_vcpus = 0 293.7 - (options, params) = getopt(args, 'lv', ['long','vcpus']) 293.8 + try: 293.9 + (options, params) = getopt(args, 'lv', ['long','vcpus']) 293.10 + except GetoptError, opterr: 293.11 + err(opterr) 293.12 + sys.exit(1) 293.13 293.14 n = len(params) 293.15 for (k, v) in options:
384.1 --- a/xen/arch/x86/mm.c Wed Aug 17 12:34:38 2005 -0800 384.2 +++ b/xen/arch/x86/mm.c Thu Aug 18 10:40:02 2005 -0800 384.3 @@ -3059,7 +3059,7 @@ static int ptwr_emulated_update( 384.4 } 384.5 384.6 /* Turn a sub-word access into a full-word access. */ 384.7 - if (bytes != sizeof(physaddr_t)) 384.8 + if ( bytes != sizeof(physaddr_t) ) 384.9 { 384.10 int rc; 384.11 physaddr_t full; 384.12 @@ -3076,6 +3076,10 @@ static int ptwr_emulated_update( 384.13 val &= (((physaddr_t)1 << (bytes*8)) - 1); 384.14 val <<= (offset)*8; 384.15 val |= full; 384.16 + /* Also fill in missing parts of the cmpxchg old value. */ 384.17 + old &= (((physaddr_t)1 << (bytes*8)) - 1); 384.18 + old <<= (offset)*8; 384.19 + old |= full; 384.20 } 384.21 384.22 /* Read the PTE that maps the page being updated. */ 384.23 @@ -3111,7 +3115,7 @@ static int ptwr_emulated_update( 384.24 if ( do_cmpxchg ) 384.25 { 384.26 ol1e = l1e_from_intpte(old); 384.27 - if ( cmpxchg((unsigned long *)pl1e, old, val) != old ) 384.28 + if ( cmpxchg((intpte_t *)pl1e, old, val) != old ) 384.29 { 384.30 unmap_domain_page(pl1e); 384.31 put_page_from_l1e(nl1e, d); 384.32 @@ -3299,8 +3303,8 @@ int ptwr_do_page_fault(struct domain *d, 384.33 384.34 /* Finally, make the p.t. page writable by the guest OS. */ 384.35 l1e_add_flags(pte, _PAGE_RW); 384.36 - if ( unlikely(__copy_to_user(&linear_pg_table[l1_linear_offset(addr)], 384.37 - &pte, sizeof(pte))) ) 384.38 + if ( unlikely(__put_user(pte.l1, 384.39 + &linear_pg_table[l1_linear_offset(addr)].l1)) ) 384.40 { 384.41 MEM_LOG("ptwr: Could not update pte at %p", (unsigned long *) 384.42 &linear_pg_table[l1_linear_offset(addr)]);
386.1 --- a/xen/arch/x86/setup.c Wed Aug 17 12:34:38 2005 -0800 386.2 +++ b/xen/arch/x86/setup.c Thu Aug 18 10:40:02 2005 -0800 386.3 @@ -244,6 +244,8 @@ static void __init start_of_day(void) 386.4 386.5 #define EARLY_FAIL() for ( ; ; ) __asm__ __volatile__ ( "hlt" ) 386.6 386.7 +static struct e820entry e820_raw[E820MAX]; 386.8 + 386.9 void __init __start_xen(multiboot_info_t *mbi) 386.10 { 386.11 char *cmdline; 386.12 @@ -253,7 +255,6 @@ void __init __start_xen(multiboot_info_t 386.13 unsigned long _initrd_start = 0, _initrd_len = 0; 386.14 unsigned int initrdidx = 1; 386.15 physaddr_t s, e; 386.16 - struct e820entry e820_raw[E820MAX]; 386.17 int i, e820_raw_nr = 0, bytes = 0; 386.18 struct ns16550_defaults ns16550 = { 386.19 .data_bits = 8,
392.1 --- a/xen/arch/x86/traps.c Wed Aug 17 12:34:38 2005 -0800 392.2 +++ b/xen/arch/x86/traps.c Thu Aug 18 10:40:02 2005 -0800 392.3 @@ -159,10 +159,8 @@ void show_trace(unsigned long *esp) 392.4 addr = *stack++; 392.5 if ( is_kernel_text(addr) ) 392.6 { 392.7 - if ( (i != 0) && ((i % 6) == 0) ) 392.8 - printk("\n "); 392.9 printk("[<%p>]", _p(addr)); 392.10 - print_symbol(" %s\n", addr); 392.11 + print_symbol(" %s\n ", addr); 392.12 i++; 392.13 } 392.14 }
400.1 --- a/xen/arch/x86/x86_32/traps.c Wed Aug 17 12:34:38 2005 -0800 400.2 +++ b/xen/arch/x86/x86_32/traps.c Thu Aug 18 10:40:02 2005 -0800 400.3 @@ -66,8 +66,9 @@ void show_registers(struct cpu_user_regs 400.4 400.5 printk("CPU: %d\nEIP: %04lx:[<%08lx>]", 400.6 smp_processor_id(), (unsigned long)0xffff & regs->cs, eip); 400.7 - print_symbol(" %s\n", eip); 400.8 - printk("EFLAGS: %08lx CONTEXT: %s\n", eflags, context); 400.9 + if ( !GUEST_MODE(regs) ) 400.10 + print_symbol(" %s", eip); 400.11 + printk("\nEFLAGS: %08lx CONTEXT: %s\n", eflags, context); 400.12 printk("eax: %08x ebx: %08x ecx: %08x edx: %08x\n", 400.13 regs->eax, regs->ebx, regs->ecx, regs->edx); 400.14 printk("esi: %08x edi: %08x ebp: %08x esp: %08lx\n",
403.1 --- a/xen/arch/x86/x86_64/traps.c Wed Aug 17 12:34:38 2005 -0800 403.2 +++ b/xen/arch/x86/x86_64/traps.c Thu Aug 18 10:40:02 2005 -0800 403.3 @@ -17,8 +17,9 @@ void show_registers(struct cpu_user_regs 403.4 { 403.5 printk("CPU: %d\nEIP: %04x:[<%016lx>]", 403.6 smp_processor_id(), 0xffff & regs->cs, regs->rip); 403.7 - print_symbol(" %s\n", regs->rip); 403.8 - printk("EFLAGS: %016lx\n", regs->eflags); 403.9 + if ( !GUEST_MODE(regs) ) 403.10 + print_symbol(" %s", regs->rip); 403.11 + printk("\nEFLAGS: %016lx\n", regs->eflags); 403.12 printk("rax: %016lx rbx: %016lx rcx: %016lx rdx: %016lx\n", 403.13 regs->rax, regs->rbx, regs->rcx, regs->rdx); 403.14 printk("rsi: %016lx rdi: %016lx rbp: %016lx rsp: %016lx\n",
415.1 --- a/xen/drivers/char/console.c Wed Aug 17 12:34:38 2005 -0800 415.2 +++ b/xen/drivers/char/console.c Thu Aug 18 10:40:02 2005 -0800 415.3 @@ -652,8 +652,9 @@ static int __init debugtrace_init(void) 415.4 void panic(const char *fmt, ...) 415.5 { 415.6 va_list args; 415.7 - char buf[128], cpustr[10]; 415.8 + char buf[128]; 415.9 unsigned long flags; 415.10 + static spinlock_t lock = SPIN_LOCK_UNLOCKED; 415.11 extern void machine_restart(char *); 415.12 415.13 debugtrace_dump(); 415.14 @@ -665,16 +666,13 @@ void panic(const char *fmt, ...) 415.15 debugger_trap_immediate(); 415.16 415.17 /* Spit out multiline message in one go. */ 415.18 - spin_lock_irqsave(&console_lock, flags); 415.19 - __putstr("\n****************************************\n"); 415.20 - __putstr("Panic on CPU"); 415.21 - sprintf(cpustr, "%d", smp_processor_id()); 415.22 - __putstr(cpustr); 415.23 - __putstr(":\n"); 415.24 - __putstr(buf); 415.25 - __putstr("****************************************\n\n"); 415.26 - __putstr("Reboot in five seconds...\n"); 415.27 - spin_unlock_irqrestore(&console_lock, flags); 415.28 + spin_lock_irqsave(&lock, flags); 415.29 + printk("\n****************************************\n"); 415.30 + printk("Panic on CPU %d:\n", smp_processor_id()); 415.31 + printk(buf); 415.32 + printk("****************************************\n\n"); 415.33 + printk("Reboot in five seconds...\n"); 415.34 + spin_unlock_irqrestore(&lock, flags); 415.35 415.36 watchdog_disable(); 415.37 mdelay(5000);
453.1 --- a/xen/include/asm-x86/uaccess.h Wed Aug 17 12:34:38 2005 -0800 453.2 +++ b/xen/include/asm-x86/uaccess.h Thu Aug 18 10:40:02 2005 -0800 453.3 @@ -125,22 +125,20 @@ extern void __put_user_bad(void); 453.4 __pu_err; \ 453.5 }) 453.6 453.7 -#define __get_user_nocheck(x,ptr,size) \ 453.8 -({ \ 453.9 - long __gu_err, __gu_val; \ 453.10 - __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\ 453.11 - (x) = (__typeof__(*(ptr)))__gu_val; \ 453.12 - __gu_err; \ 453.13 +#define __get_user_nocheck(x,ptr,size) \ 453.14 +({ \ 453.15 + long __gu_err; \ 453.16 + __get_user_size((x),(ptr),(size),__gu_err,-EFAULT); \ 453.17 + __gu_err; \ 453.18 }) 453.19 453.20 -#define __get_user_check(x,ptr,size) \ 453.21 -({ \ 453.22 - long __gu_err, __gu_val; \ 453.23 - __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 453.24 - __get_user_size(__gu_val,__gu_addr,(size),__gu_err,-EFAULT); \ 453.25 - (x) = (__typeof__(*(ptr)))__gu_val; \ 453.26 - if (!__addr_ok(__gu_addr)) __gu_err = -EFAULT; \ 453.27 - __gu_err; \ 453.28 +#define __get_user_check(x,ptr,size) \ 453.29 +({ \ 453.30 + long __gu_err; \ 453.31 + __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 453.32 + __get_user_size((x),__gu_addr,(size),__gu_err,-EFAULT); \ 453.33 + if (!__addr_ok(__gu_addr)) __gu_err = -EFAULT; \ 453.34 + __gu_err; \ 453.35 }) 453.36 453.37 struct __large_struct { unsigned long buf[100]; };