direct-io.hg
changeset 4584:7d4cb393933f
bitkeeper revision 1.1332 (42662dafs9ga33FcmQTwHMpRHPe8_Q)
[PATCH] [PATCH 2/2] x86_64 whitespace cleanup in Linux sparse
Eliminate extraneous whitespace changes in x86_64 portion of Linux
sparse patch.
Signed-off-by: Chris Wright <chrisw@osdl.org>
[PATCH] [PATCH 2/2] x86_64 whitespace cleanup in Linux sparse
Eliminate extraneous whitespace changes in x86_64 portion of Linux
sparse patch.
Signed-off-by: Chris Wright <chrisw@osdl.org>
line diff
1.1 --- a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/entry.S Wed Apr 20 10:23:38 2005 +0000 1.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/entry.S Wed Apr 20 10:23:43 2005 +0000 1.3 @@ -108,7 +108,7 @@ ECF_IN_SYSCALL = (1<<8) 1.4 #define preempt_stop 1.5 #define retint_kernel retint_restore_args 1.6 #endif 1.7 - 1.8 + 1.9 1.10 /* 1.11 * C code is not supposed to know about undefined top of stack. Every time 1.12 @@ -191,7 +191,7 @@ ECF_IN_SYSCALL = (1<<8) 1.13 1: 1.14 .endm 1.15 1.16 -/* 1.17 +/* 1.18 * A newly forked process directly context switches into this. 1.19 */ 1.20 /* rdi: prev */ 1.21 @@ -276,7 +276,7 @@ sysret_check: 1.22 XEN_UNBLOCK_EVENTS(%r11) 1.23 RESTORE_ARGS 0,8,0 1.24 SWITCH_TO_USER ECF_IN_SYSCALL 1.25 - 1.26 + 1.27 /* Handle reschedules */ 1.28 /* edx: work, edi: workmask */ 1.29 sysret_careful: 1.30 @@ -491,11 +491,11 @@ ENTRY(stub_rt_sigreturn) 1.31 #else 1.32 SAVE_ARGS 1.33 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler 1.34 -#endif 1.35 +#endif 1.36 #if 0 /* For Xen we don't need to do this */ 1.37 testl $3,CS(%rdi) 1.38 je 1f 1.39 - swapgs 1.40 + swapgs 1.41 #endif 1.42 1: addl $1,%gs:pda_irqcount # RED-PEN should check preempt count 1.43 movq %gs:pda_irqstackptr,%rax 1.44 @@ -508,16 +508,16 @@ retint_check: 1.45 movl threadinfo_flags(%rcx),%edx 1.46 andl %edi,%edx 1.47 jnz retint_careful 1.48 -retint_restore_args: 1.49 - RESTORE_ARGS 0,8,0 1.50 +retint_restore_args: 1.51 + RESTORE_ARGS 0,8,0 1.52 testb $3,8(%rsp) # check CS 1.53 jnz user_mode 1.54 kernel_mode: 1.55 orb $3,1*8(%rsp) 1.56 - iretq 1.57 + iretq 1.58 user_mode: 1.59 SWITCH_TO_USER 0 1.60 - 1.61 + 1.62 /* edi: workmask, edx: work */ 1.63 retint_careful: 1.64 bt $TIF_NEED_RESCHED,%edx 1.65 @@ -527,7 +527,7 @@ retint_careful: 1.66 /* sti */ 1.67 pushq %rdi 1.68 call schedule 1.69 - popq %rdi 1.70 + popq %rdi 1.71 XEN_GET_VCPU_INFO(%r11) 1.72 XEN_BLOCK_EVENTS(%r11) 1.73 GET_THREAD_INFO(%rcx) 1.74 @@ -661,7 +661,7 @@ ENTRY(error_entry) 1.75 CFI_REL_OFFSET rsp,(RSP-RDI) 1.76 CFI_REL_OFFSET rip,(RIP-RDI) 1.77 /* rdi slot contains rax, oldrax contains error code */ 1.78 - cld 1.79 + cld 1.80 subq $14*8,%rsp 1.81 CFI_ADJUST_CFA_OFFSET (14*8) 1.82 movq %rsi,13*8(%rsp) 1.83 @@ -717,7 +717,7 @@ error_check_event: 1.84 andb $1,%al # %al == mask & ~saved_mask 1.85 jnz restore_all_enable_events # != 0 => reenable event delivery 1.86 XEN_UNLOCK_VCPU_INFO_SMP(%rsi) 1.87 -error_exit: 1.88 +error_exit: 1.89 RESTORE_REST 1.90 /* cli */ 1.91 GET_THREAD_INFO(%rcx) 1.92 @@ -727,7 +727,7 @@ error_exit: 1.93 movl $_TIF_WORK_MASK,%edi 1.94 andl %edi,%edx 1.95 jnz retint_careful 1.96 - RESTORE_ARGS 0,8,0 1.97 + RESTORE_ARGS 0,8,0 1.98 SWITCH_TO_USER 0 1.99 CFI_ENDPROC 1.100 1.101 @@ -756,7 +756,7 @@ error_kernelspace: 1.102 je error_swapgs 1.103 jmp error_sti 1.104 #endif 1.105 - 1.106 + 1.107 ENTRY(hypervisor_callback) 1.108 zeroentry do_hypervisor_callback 1.109 1.110 @@ -896,7 +896,7 @@ 9: movq $0,(%rsp); \ 1.111 .long 3b,8b; \ 1.112 .long 4b,9b; \ 1.113 .previous 1.114 - 1.115 + 1.116 .section __ex_table,"a" 1.117 .align 8 1.118 .quad gs_change,bad_gs 1.119 @@ -1051,7 +1051,7 @@ ENTRY(debug) 1.120 CFI_ENDPROC 1.121 1.122 #if 0 1.123 - /* runs on exception stack */ 1.124 + /* runs on exception stack */ 1.125 ENTRY(nmi) 1.126 CFI_STARTPROC 1.127 pushq $-1
2.1 --- a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/head.S Wed Apr 20 10:23:38 2005 +0000 2.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/head.S Wed Apr 20 10:23:43 2005 +0000 2.3 @@ -41,7 +41,7 @@ 2.4 .code64 2.5 ENTRY(_start) 2.6 cld 2.7 - movq init_rsp(%rip),%rsp 2.8 + movq init_rsp(%rip),%rsp 2.9 /* Copy the necessary stuff from xen_start_info structure. */ 2.10 movq $xen_start_info_union,%rdi 2.11 movq $64,%rcx /* sizeof (union xen_start_info_union) / sizeof (long) */ 2.12 @@ -89,7 +89,7 @@ ENTRY(lgdt_finish) 2.13 pushq %rax 2.14 lretq 2.15 #endif 2.16 - 2.17 + 2.18 ENTRY(stext) 2.19 ENTRY(_stext) 2.20 2.21 @@ -113,7 +113,7 @@ ENTRY(init_level4_pgt) 2.22 .org 0x2000 2.23 ENTRY(init_level4_user_pgt) 2.24 .fill 512,8,0 2.25 - 2.26 + 2.27 /* 2.28 * This is used for vsyscall area mapping as we have a different 2.29 * level4 page table for user. 2.30 @@ -130,7 +130,7 @@ ENTRY(cpu_gdt_table) 2.31 .quad 0x008ffa000000ffff /* __KERNEL_COMPAT32_CS */ 2.32 .quad 0x00affa000000ffff /* __KERNEL_CS */ 2.33 .quad 0x00cff2000000ffff /* __KERNEL_DS */ 2.34 - 2.35 + 2.36 .quad 0x00cffa000000ffff /* __USER32_CS */ 2.37 .quad 0x00cff2000000ffff /* __USER_DS, __USER32_DS */ 2.38 .quad 0x00affa000000ffff /* __USER_CS */ 2.39 @@ -194,7 +194,7 @@ gdt32_end: 2.40 * IRET will check the segment types kkeil 2000/10/28 2.41 * Also sysret mandates a special GDT layout 2.42 */ 2.43 - 2.44 + 2.45 #if 0 2.46 .align L1_CACHE_BYTES 2.47 #endif
3.1 --- a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/ldt.c Wed Apr 20 10:23:38 2005 +0000 3.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/ldt.c Wed Apr 20 10:23:43 2005 +0000 3.3 @@ -27,9 +27,8 @@ 3.4 #ifdef CONFIG_SMP /* avoids "defined but not used" warnig */ 3.5 static void flush_ldt(void *null) 3.6 { 3.7 - if (current->active_mm) { 3.8 - load_LDT(¤t->active_mm->context); 3.9 - } 3.10 + if (current->active_mm) 3.11 + load_LDT(¤t->active_mm->context); 3.12 } 3.13 #endif 3.14
4.1 --- a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/pci-dma.c Wed Apr 20 10:23:38 2005 +0000 4.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/pci-dma.c Wed Apr 20 10:23:43 2005 +0000 4.3 @@ -26,18 +26,18 @@ 4.4 * the same here. 4.5 */ 4.6 int dma_map_sg(struct device *hwdev, struct scatterlist *sg, 4.7 - int nents, int direction) 4.8 + int nents, int direction) 4.9 { 4.10 - int i; 4.11 + int i; 4.12 4.13 - BUG_ON(direction == DMA_NONE); 4.14 - for (i = 0; i < nents; i++ ) { 4.15 - struct scatterlist *s = &sg[i]; 4.16 - BUG_ON(!s->page); 4.17 - s->dma_address = virt_to_bus(page_address(s->page) +s->offset); 4.18 - s->dma_length = s->length; 4.19 - } 4.20 - return nents; 4.21 + BUG_ON(direction == DMA_NONE); 4.22 + for (i = 0; i < nents; i++ ) { 4.23 + struct scatterlist *s = &sg[i]; 4.24 + BUG_ON(!s->page); 4.25 + s->dma_address = virt_to_bus(page_address(s->page) +s->offset); 4.26 + s->dma_length = s->length; 4.27 + } 4.28 + return nents; 4.29 } 4.30 4.31 EXPORT_SYMBOL(dma_map_sg); 4.32 @@ -47,15 +47,15 @@ EXPORT_SYMBOL(dma_map_sg); 4.33 * pci_unmap_single() above. 4.34 */ 4.35 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, 4.36 - int nents, int dir) 4.37 + int nents, int dir) 4.38 { 4.39 - int i; 4.40 - for (i = 0; i < nents; i++) { 4.41 - struct scatterlist *s = &sg[i]; 4.42 - BUG_ON(s->page == NULL); 4.43 - BUG_ON(s->dma_address == 0); 4.44 - dma_unmap_single(dev, s->dma_address, s->dma_length, dir); 4.45 - } 4.46 + int i; 4.47 + for (i = 0; i < nents; i++) { 4.48 + struct scatterlist *s = &sg[i]; 4.49 + BUG_ON(s->page == NULL); 4.50 + BUG_ON(s->dma_address == 0); 4.51 + dma_unmap_single(dev, s->dma_address, s->dma_length, dir); 4.52 + } 4.53 } 4.54 4.55 struct dma_coherent_mem {
5.1 --- a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/process.c Wed Apr 20 10:23:38 2005 +0000 5.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/process.c Wed Apr 20 10:23:43 2005 +0000 5.3 @@ -121,7 +121,7 @@ void cpu_idle (void) 5.4 if (cpu_isset(cpu, cpu_idle_map)) 5.5 cpu_clear(cpu, cpu_idle_map); 5.6 rmb(); 5.7 - 5.8 + 5.9 __IRQ_STAT(cpu,idle_timestamp) = jiffies; 5.10 xen_idle(); 5.11 } 5.12 @@ -369,7 +369,7 @@ struct task_struct *__switch_to(struct t 5.13 struct tss_struct *tss = &per_cpu(init_tss, cpu); 5.14 physdev_op_t iopl_op, iobmp_op; 5.15 multicall_entry_t _mcl[8], *mcl = _mcl; 5.16 - 5.17 + 5.18 /* 5.19 * This is basically '__unlazy_fpu', except that we queue a 5.20 * multicall to indicate FPU task switch, rather than 5.21 @@ -646,7 +646,7 @@ long do_arch_prctl(struct task_struct *t 5.22 if (doit) { 5.23 /* set the selector to 0 to not confuse 5.24 __switch_to */ 5.25 - asm volatile("movl %0,%%fs" :: "r" (0)); 5.26 + asm volatile("movl %0,%%fs" :: "r" (0)); 5.27 ret = HYPERVISOR_set_segment_base(SEGBASE_FS, addr); 5.28 5.29 } 5.30 @@ -706,7 +706,7 @@ int dump_task_regs(struct task_struct *t 5.31 ptregs.ss &= 0xffff; 5.32 5.33 elf_core_copy_regs(regs, &ptregs); 5.34 - 5.35 + 5.36 boot_option_idle_override = 1; 5.37 return 1; 5.38 }
6.1 --- a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/setup.c Wed Apr 20 10:23:38 2005 +0000 6.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/setup.c Wed Apr 20 10:23:43 2005 +0000 6.3 @@ -85,7 +85,6 @@ unsigned long xen_override_max_pfn; 6.4 6.5 struct cpuinfo_x86 boot_cpu_data; 6.6 6.7 - 6.8 unsigned long mmu_cr4_features; 6.9 EXPORT_SYMBOL_GPL(mmu_cr4_features); 6.10 6.11 @@ -385,7 +384,7 @@ static __init void parse_cmdline_early ( 6.12 disable_apic = 1; 6.13 6.14 if (!memcmp(from, "noapic", 6)) 6.15 - skip_ioapic_setp = 1; 6.16 + skip_ioapic_setup = 1; 6.17 6.18 if (!memcmp(from, "apic", 4)) { 6.19 skip_ioapic_setup = 0; 6.20 @@ -428,7 +427,7 @@ static __init void parse_cmdline_early ( 6.21 #ifndef CONFIG_DISCONTIGMEM 6.22 static void __init contig_initmem_init(void) 6.23 { 6.24 - unsigned long bootmap_size, bootmap; 6.25 + unsigned long bootmap_size, bootmap; 6.26 6.27 /* 6.28 * partially used pages are not usable - thus 6.29 @@ -606,7 +605,7 @@ void __init setup_arch(char **cmdline_p) 6.30 unsigned long low_mem_size; 6.31 int i, j; 6.32 physdev_op_t op; 6.33 - 6.34 + 6.35 #if 0 6.36 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); 6.37 #else 6.38 @@ -759,7 +758,7 @@ void __init setup_arch(char **cmdline_p) 6.39 } 6.40 } 6.41 #endif 6.42 - paging_init(); 6.43 + paging_init(); 6.44 6.45 /* Make sure we have a large enough P->M table. */ 6.46 if (end_pfn > xen_start_info.nr_pages) { 6.47 @@ -785,7 +784,7 @@ void __init setup_arch(char **cmdline_p) 6.48 } 6.49 6.50 #if 0 6.51 - check_ioapic(); 6.52 + check_ioapic(); 6.53 #endif 6.54 6.55 #ifdef CONFIG_ACPI_BOOT 6.56 @@ -803,7 +802,7 @@ void __init setup_arch(char **cmdline_p) 6.57 get_smp_config(); 6.58 init_apic_mappings(); 6.59 #endif 6.60 - 6.61 + 6.62 /* XXX Disable irqdebug until we have a way to avoid interrupt 6.63 * conflicts. */ 6.64 /* noirqdebug_setup(""); */ 6.65 @@ -835,7 +834,7 @@ void __init setup_arch(char **cmdline_p) 6.66 pci_mem_start = low_mem_size; 6.67 6.68 #ifdef CONFIG_GART_IOMMU 6.69 - iommu_hole_init(); 6.70 + iommu_hole_init(); 6.71 #endif 6.72 6.73 op.cmd = PHYSDEVOP_SET_IOPL;
7.1 --- a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/setup64.c Wed Apr 20 10:23:38 2005 +0000 7.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/setup64.c Wed Apr 20 10:23:43 2005 +0000 7.3 @@ -58,7 +58,7 @@ off Disable 7.4 */ 7.5 int __init nonx_setup(char *str) 7.6 { 7.7 - if (!strcmp(str, "on")) { 7.8 + if (!strncmp(str, "on", 2)) { 7.9 __supported_pte_mask |= _PAGE_NX; 7.10 do_not_nx = 0; 7.11 } else if (!strncmp(str, "off", 3)) { 7.12 @@ -126,18 +126,18 @@ void pda_init(int cpu) 7.13 { 7.14 pgd_t *old_level4 = (pgd_t *)xen_start_info.pt_base; 7.15 struct x8664_pda *pda = &cpu_pda[cpu]; 7.16 - 7.17 + 7.18 /* Setup up data that may be needed in __get_free_pages early */ 7.19 asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0)); 7.20 HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL, 7.21 (unsigned long)(cpu_pda + cpu)); 7.22 - 7.23 - pda->me = pda; 7.24 - pda->cpunumber = cpu; 7.25 - pda->irqcount = -1; 7.26 - pda->kernelstack = 7.27 - (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE; 7.28 - pda->active_mm = &init_mm; 7.29 + 7.30 + pda->me = pda; 7.31 + pda->cpunumber = cpu; 7.32 + pda->irqcount = -1; 7.33 + pda->kernelstack = 7.34 + (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE; 7.35 + pda->active_mm = &init_mm; 7.36 pda->mmu_state = 0; 7.37 pda->kernel_mode = 1; 7.38 7.39 @@ -258,7 +258,7 @@ void __init cpu_init (void) 7.40 printk("Initializing CPU#%d\n", cpu); 7.41 7.42 #if 0 7.43 - clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 7.44 + clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 7.45 #endif 7.46 /* 7.47 * Initialize the per-CPU GDT with the boot GDT, 7.48 @@ -278,12 +278,12 @@ void __init cpu_init (void) 7.49 7.50 #if 0 7.51 memcpy(me->thread.tls_array, cpu_gdt_table[cpu], GDT_ENTRY_TLS_ENTRIES * 8); 7.52 - 7.53 + 7.54 #endif 7.55 memcpy(me->thread.tls_array, &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN], 7.56 GDT_ENTRY_TLS_ENTRIES * 8); 7.57 7.58 - /* 7.59 + /* 7.60 * Delete NT 7.61 */ 7.62
8.1 --- a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/traps.c Wed Apr 20 10:23:38 2005 +0000 8.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/traps.c Wed Apr 20 10:23:43 2005 +0000 8.3 @@ -948,7 +948,7 @@ void __init trap_init(void) 8.4 #ifdef CONFIG_IA32_EMULATION 8.5 set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall); 8.6 #endif 8.7 - 8.8 + 8.9 /* 8.10 * Should be a barrier for any external CPU state. 8.11 */
9.1 --- a/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/init.c Wed Apr 20 10:23:38 2005 +0000 9.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/init.c Wed Apr 20 10:23:43 2005 +0000 9.3 @@ -416,7 +416,7 @@ void __set_fixmap_user (enum fixed_addre 9.4 9.5 9.6 void __set_fixmap_ma (enum fixed_addresses idx, unsigned long phys, pgprot_t prot) 9.7 -{ 9.8 +{ 9.9 unsigned long address = __fix_to_virt(idx); 9.10 9.11 if (idx >= __end_of_fixed_addresses) { 9.12 @@ -464,21 +464,21 @@ unsigned long get_machine_pfn(unsigned l 9.13 pte_t *pte = pte_offset_kernel(pmd, addr); 9.14 9.15 return (pte->pte >> PAGE_SHIFT); 9.16 -} 9.17 +} 9.18 9.19 9.20 /* 9.21 * We start using from start_pfn 9.22 */ 9.23 static __init void *alloc_low_page(unsigned long *phys) 9.24 -{ 9.25 +{ 9.26 unsigned long pfn = table_end++; 9.27 9.28 *phys = (pfn << PAGE_SHIFT); 9.29 memset((void *) ((pfn << PAGE_SHIFT) + __START_KERNEL_map), 0, PAGE_SIZE); 9.30 9.31 return (void *)((pfn << PAGE_SHIFT) + __START_KERNEL_map); 9.32 -} 9.33 +} 9.34 9.35 #define PTE_SIZE PAGE_SIZE 9.36 9.37 @@ -500,11 +500,11 @@ void __init phys_pud_init(pud_t *pud, un 9.38 pmd_t *pmd; 9.39 9.40 paddr = address + i*PUD_SIZE; 9.41 - if (paddr >= end) { 9.42 + if (paddr >= end) { 9.43 for (; i < PTRS_PER_PUD; i++, pud++) 9.44 set_pud(pud, __pud(0)); 9.45 break; 9.46 - } 9.47 + } 9.48 9.49 pmd = alloc_low_page(&pmd_phys); 9.50 make_page_readonly(pmd); 9.51 @@ -515,11 +515,11 @@ void __init phys_pud_init(pud_t *pud, un 9.52 unsigned long pte_phys; 9.53 pte_t *pte, *pte_save; 9.54 9.55 - if (paddr >= end) { 9.56 + if (paddr >= end) { 9.57 for (; j < PTRS_PER_PMD; j++, pmd++) 9.58 set_pmd(pmd, __pmd(0)); 9.59 break; 9.60 - } 9.61 + } 9.62 pte = alloc_low_page(&pte_phys); 9.63 pte_save = pte; 9.64 for (k = 0; k < PTRS_PER_PTE; pte++, k++, paddr += PTE_SIZE) { 9.65 @@ -550,8 +550,8 @@ static void __init find_early_table_spac 9.66 { 9.67 unsigned long puds, pmds, ptes; 9.68 9.69 - puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 9.70 - pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 9.71 + puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 9.72 + pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 9.73 ptes = (end + PTE_SIZE - 1) >> PAGE_SHIFT; 9.74 9.75 tables_reserved = round_up(puds*8, PAGE_SIZE) + round_up(pmds * 8, PAGE_SIZE) 9.76 @@ -570,12 +570,12 @@ void __init init_memory_mapping(unsigned 9.77 9.78 Dprintk("init_memory_mapping\n"); 9.79 9.80 - find_early_table_space(end); 9.81 + find_early_table_space(end); 9.82 9.83 - start = (unsigned long)__va(start); 9.84 - end = (unsigned long)__va(end); 9.85 + start = (unsigned long)__va(start); 9.86 + end = (unsigned long)__va(end); 9.87 9.88 - for (; start < end; start = next) { 9.89 + for (; start < end; start = next) { 9.90 unsigned long pud_phys; 9.91 pud_t *pud = alloc_low_page(&pud_phys); 9.92 make_page_readonly(pud); 9.93 @@ -583,9 +583,9 @@ void __init init_memory_mapping(unsigned 9.94 next = start + PGDIR_SIZE; 9.95 if (next > end) 9.96 next = end; 9.97 - phys_pud_init(pud, __pa(start), __pa(next)); 9.98 - set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys)); 9.99 - } 9.100 + phys_pud_init(pud, __pa(start), __pa(next)); 9.101 + set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys)); 9.102 + } 9.103 9.104 early_printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end, 9.105 table_start<<PAGE_SHIFT, 9.106 @@ -622,14 +622,14 @@ void __init paging_init(void) 9.107 /* unsigned int max_dma; */ 9.108 /* max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; */ 9.109 /* if (end_pfn < max_dma) */ 9.110 - zones_size[ZONE_DMA] = end_pfn; 9.111 + zones_size[ZONE_DMA] = end_pfn; 9.112 #if 0 9.113 else { 9.114 zones_size[ZONE_DMA] = max_dma; 9.115 zones_size[ZONE_NORMAL] = end_pfn - max_dma; 9.116 - } 9.117 + } 9.118 #endif 9.119 - free_area_init(zones_size); 9.120 + free_area_init(zones_size); 9.121 } 9.122 9.123 __set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info, 9.124 @@ -672,7 +672,7 @@ void __init clear_kernel_mapping(unsigne 9.125 pud = pud_offset(pgd, address); 9.126 if (pud_none(*pud)) 9.127 continue; 9.128 - pmd = pmd_offset(pud, address); 9.129 + pmd = pmd_offset(pud, address); 9.130 if (!pmd || pmd_none(*pmd)) 9.131 continue; 9.132 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) { 9.133 @@ -774,7 +774,7 @@ void __init mem_init(void) 9.134 * the WP-bit has been tested. 9.135 */ 9.136 #ifndef CONFIG_SMP 9.137 - zap_low_mappings(); 9.138 + zap_low_mappings(); 9.139 #endif 9.140 } 9.141 9.142 @@ -836,16 +836,16 @@ void __init reserve_bootmem_generic(unsi 9.143 9.144 int kern_addr_valid(unsigned long addr) 9.145 { 9.146 - unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; 9.147 - pgd_t *pgd; 9.148 - pud_t *pud; 9.149 - pmd_t *pmd; 9.150 - pte_t *pte; 9.151 + unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; 9.152 + pgd_t *pgd; 9.153 + pud_t *pud; 9.154 + pmd_t *pmd; 9.155 + pte_t *pte; 9.156 9.157 if (above != 0 && above != -1UL) 9.158 return 0; 9.159 9.160 - pgd = pgd_offset_k(addr); 9.161 + pgd = pgd_offset_k(addr); 9.162 if (pgd_none(*pgd)) 9.163 return 0; 9.164 9.165 @@ -853,13 +853,13 @@ int kern_addr_valid(unsigned long addr) 9.166 if (pud_none(*pud)) 9.167 return 0; 9.168 9.169 - pmd = pmd_offset(pud, addr); 9.170 + pmd = pmd_offset(pud, addr); 9.171 if (pmd_none(*pmd)) 9.172 return 0; 9.173 if (pmd_large(*pmd)) 9.174 return pfn_valid(pmd_pfn(*pmd)); 9.175 9.176 - pte = pte_offset_kernel(pmd, addr); 9.177 + pte = pte_offset_kernel(pmd, addr); 9.178 if (pte_none(*pte)) 9.179 return 0; 9.180 return pfn_valid(pte_pfn(*pte));
10.1 --- a/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/ioremap.c Wed Apr 20 10:23:38 2005 +0000 10.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/mm/ioremap.c Wed Apr 20 10:23:43 2005 +0000 10.3 @@ -112,7 +112,7 @@ void __iomem * __ioremap(unsigned long p 10.4 */ 10.5 if (is_local_lowmem(phys_addr)) { 10.6 char *t_addr, *t_end; 10.7 - struct page *page; 10.8 + struct page *page; 10.9 10.10 t_addr = bus_to_virt(phys_addr); 10.11 t_end = t_addr + (size - 1);