ia64/xen-unstable

changeset 6583:6fa6c392d258

Linux 2.6 cleanups.
Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
author cl349@firebug.cl.cam.ac.uk
date Thu Sep 01 08:25:22 2005 +0000 (2005-09-01)
parents 6d4c0bfc3c1c
children aaee260ce02c
files linux-2.6-xen-sparse/arch/xen/Makefile linux-2.6-xen-sparse/arch/xen/i386/Kconfig linux-2.6-xen-sparse/arch/xen/i386/kernel/Makefile linux-2.6-xen-sparse/arch/xen/i386/kernel/entry.S linux-2.6-xen-sparse/arch/xen/i386/kernel/head.S linux-2.6-xen-sparse/arch/xen/i386/kernel/irq.c linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c linux-2.6-xen-sparse/arch/xen/i386/kernel/traps.c linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c linux-2.6-xen-sparse/arch/xen/x86_64/Kconfig linux-2.6-xen-sparse/drivers/char/mem.c linux-2.6-xen-sparse/include/asm-xen/asm-i386/mmu_context.h linux-2.6-xen-sparse/include/asm-xen/asm-i386/processor.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/xen/Makefile	Wed Aug 31 10:24:43 2005 +0000
     1.2 +++ b/linux-2.6-xen-sparse/arch/xen/Makefile	Thu Sep 01 08:25:22 2005 +0000
     1.3 @@ -65,6 +65,7 @@ vmlinuz: vmlinux
     1.4  
     1.5  XINSTALL_NAME ?= $(KERNELRELEASE)
     1.6  install: vmlinuz
     1.7 +install kernel_install:
     1.8  	mkdir -p $(INSTALL_PATH)/boot
     1.9  	ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX) $(INSTALL_PATH)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
    1.10  	rm -f $(INSTALL_PATH)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
     2.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/Kconfig	Wed Aug 31 10:24:43 2005 +0000
     2.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/Kconfig	Thu Sep 01 08:25:22 2005 +0000
     2.3 @@ -379,18 +379,18 @@ config SMP
     2.4  	  If you don't know what to do here, say N.
     2.5  
     2.6  config SMP_ALTERNATIVES
     2.7 -        bool "SMP alternatives support (EXPERIMENTAL)"
     2.8 -        depends on SMP && EXPERIMENTAL
     2.9 -        help
    2.10 -          Try to reduce the overhead of running an SMP kernel on a uniprocessor
    2.11 -          host slightly by replacing certain key instruction sequences
    2.12 -          according to whether we currently have more than one CPU available.
    2.13 -          This should provide a noticeable boost to performance when
    2.14 -          running SMP kernels on UP machines, and have negligible impact
    2.15 -          when running on an true SMP host.
    2.16 +	bool "SMP alternatives support (EXPERIMENTAL)"
    2.17 +	depends on SMP && EXPERIMENTAL
    2.18 +	help
    2.19 +	  Try to reduce the overhead of running an SMP kernel on a uniprocessor
    2.20 +	  host slightly by replacing certain key instruction sequences
    2.21 +	  according to whether we currently have more than one CPU available.
    2.22 +	  This should provide a noticeable boost to performance when
    2.23 +	  running SMP kernels on UP machines, and have negligible impact
    2.24 +	  when running on an true SMP host.
    2.25  
    2.26            If unsure, say N.
    2.27 -
    2.28 +	  
    2.29  config NR_CPUS
    2.30  	int "Maximum number of CPUs (2-255)"
    2.31  	range 2 255
     3.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/Makefile	Wed Aug 31 10:24:43 2005 +0000
     3.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/Makefile	Thu Sep 01 08:25:22 2005 +0000
     3.3 @@ -5,6 +5,7 @@
     3.4  XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
     3.5  
     3.6  CFLAGS	+= -Iarch/$(XENARCH)/kernel
     3.7 +AFLAGS	+= -Iarch/$(XENARCH)/kernel
     3.8  
     3.9  extra-y := head.o init_task.o
    3.10  
    3.11 @@ -32,7 +33,7 @@ obj-$(CONFIG_X86_MPPARSE)	+= mpparse.o
    3.12  obj-$(CONFIG_X86_LOCAL_APIC)	+= apic.o
    3.13  c-obj-$(CONFIG_X86_LOCAL_APIC)	+= nmi.o
    3.14  obj-$(CONFIG_X86_IO_APIC)	+= io_apic.o
    3.15 -c-obj-$(CONFIG_X86_REBOOTFIXUPS)+= reboot_fixups.o
    3.16 +c-obj-$(CONFIG_X86_REBOOTFIXUPS)	+= reboot_fixups.o
    3.17  c-obj-$(CONFIG_X86_NUMAQ)	+= numaq.o
    3.18  c-obj-$(CONFIG_X86_SUMMIT_NUMA)	+= summit.o
    3.19  c-obj-$(CONFIG_MODULES)		+= module.o
    3.20 @@ -69,7 +70,7 @@ SYSCFLAGS_vsyscall-int80.so	= $(vsyscall
    3.21  
    3.22  $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so: \
    3.23  $(obj)/vsyscall-%.so: $(src)/vsyscall.lds \
    3.24 -		      $(obj)/vsyscall-%.o FORCE
    3.25 +		      $(obj)/vsyscall-%.o $(obj)/vsyscall-note.o FORCE
    3.26  	$(call if_changed,syscall)
    3.27  
    3.28  # We also create a special relocatable object that should mirror the symbol
    3.29 @@ -81,20 +82,17 @@ extra-y += vsyscall-syms.o
    3.30  
    3.31  SYSCFLAGS_vsyscall-syms.o = -r
    3.32  $(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
    3.33 -			$(obj)/vsyscall-sysenter.o FORCE
    3.34 +			$(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE
    3.35  	$(call if_changed,syscall)
    3.36  
    3.37  c-link	:=
    3.38 -s-link	:= vsyscall-int80.o vsyscall-sysenter.o vsyscall-sigreturn.o vsyscall.lds.o syscall_table.o
    3.39 +s-link	:= vsyscall-int80.o vsyscall-sysenter.o vsyscall-sigreturn.o vsyscall.lds.o vsyscall-note.o
    3.40  
    3.41  $(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-obj-m) $(c-link)) $(patsubst %.o,$(obj)/%.S,$(s-obj-y) $(s-link)):
    3.42  	@ln -fsn $(srctree)/arch/i386/kernel/$(notdir $@) $@
    3.43  
    3.44  $(obj)/vsyscall-int80.S: $(obj)/vsyscall-sigreturn.S
    3.45  
    3.46 -EXTRA_AFLAGS	+= -I$(obj)
    3.47 -$(obj)/entry.o: $(src)/entry.S $(src)/syscall_table.S
    3.48 -
    3.49  obj-y	+= $(c-obj-y) $(s-obj-y)
    3.50  obj-m	+= $(c-obj-m)
    3.51  
     4.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/entry.S	Wed Aug 31 10:24:43 2005 +0000
     4.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/entry.S	Thu Sep 01 08:25:22 2005 +0000
     4.3 @@ -47,6 +47,7 @@
     4.4  #include <asm/segment.h>
     4.5  #include <asm/smp.h>
     4.6  #include <asm/page.h>
     4.7 +#include <asm/desc.h>
     4.8  #include "irq_vectors.h"
     4.9  #include <asm-xen/xen-public/xen.h>
    4.10  
    4.11 @@ -112,7 +113,7 @@ VM_MASK		= 0x00020000
    4.12  				XEN_BLOCK_EVENTS(%esi)
    4.13  #else
    4.14  #define preempt_stop
    4.15 -#define resume_kernel		restore_all
    4.16 +#define resume_kernel		restore_nocheck
    4.17  #endif
    4.18  
    4.19  #define SAVE_ALL \
    4.20 @@ -161,11 +162,9 @@ 4:	movl $0,(%esp);	\
    4.21  	addl $4, %esp;	\
    4.22  1:	iret;		\
    4.23  .section .fixup,"ax";   \
    4.24 -2:	movl $(__USER_DS), %edx; \
    4.25 -	movl %edx, %ds; \
    4.26 -	movl %edx, %es; \
    4.27 -	movl $11,%eax;	\
    4.28 -	call do_exit;	\
    4.29 +2:	pushl $0;	\
    4.30 +	pushl $do_iret_error;	\
    4.31 +	jmp error_code;	\
    4.32  .previous;		\
    4.33  .section __ex_table,"a";\
    4.34  	.align 4;	\
    4.35 @@ -196,7 +195,7 @@ ret_from_intr:
    4.36  	movl EFLAGS(%esp), %eax		# mix EFLAGS and CS
    4.37  	movb CS(%esp), %al
    4.38  	testl $(VM_MASK | 2), %eax
    4.39 -	jz resume_kernel		# returning to kernel or vm86-space
    4.40 +	jz resume_kernel
    4.41  ENTRY(resume_userspace)
    4.42  	XEN_BLOCK_EVENTS(%esi)		# make sure we don't miss an interrupt
    4.43  					# setting need_resched or sigpending
    4.44 @@ -211,7 +210,7 @@ ENTRY(resume_userspace)
    4.45  ENTRY(resume_kernel)
    4.46  	XEN_BLOCK_EVENTS(%esi)
    4.47  	cmpl $0,TI_preempt_count(%ebp)	# non-zero preempt_count ?
    4.48 -	jnz restore_all
    4.49 +	jnz restore_nocheck
    4.50  need_resched:
    4.51  	movl TI_flags(%ebp), %ecx	# need_resched set ?
    4.52  	testb $_TIF_NEED_RESCHED, %cl
    4.53 @@ -252,7 +251,8 @@ 1:	movl (%ebp),%ebp
    4.54  	SAVE_ALL
    4.55  	GET_THREAD_INFO(%ebp)
    4.56  
    4.57 -	testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
    4.58 +	/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
    4.59 +	testw $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),TI_flags(%ebp)
    4.60  	jnz syscall_trace_entry
    4.61  	cmpl $(nr_syscalls), %eax
    4.62  	jae syscall_badsys
    4.63 @@ -276,7 +276,8 @@ ENTRY(system_call)
    4.64  	SAVE_ALL
    4.65  	GET_THREAD_INFO(%ebp)
    4.66  					# system call tracing in operation
    4.67 -	testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
    4.68 +	/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
    4.69 +	testw $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),TI_flags(%ebp)
    4.70  	jnz syscall_trace_entry
    4.71  	cmpl $(nr_syscalls), %eax
    4.72  	jae syscall_badsys
    4.73 @@ -290,7 +291,20 @@ syscall_exit:
    4.74  	movl TI_flags(%ebp), %ecx
    4.75  	testw $_TIF_ALLWORK_MASK, %cx	# current->work
    4.76  	jne syscall_exit_work
    4.77 +
    4.78  restore_all:
    4.79 +#if 0 /* XEN */
    4.80 +	movl EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
    4.81 +	# Warning: OLDSS(%esp) contains the wrong/random values if we
    4.82 +	# are returning to the kernel.
    4.83 +	# See comments in process.c:copy_thread() for details.
    4.84 +	movb OLDSS(%esp), %ah
    4.85 +	movb CS(%esp), %al
    4.86 +	andl $(VM_MASK | (4 << 8) | 3), %eax
    4.87 +	cmpl $((4 << 8) | 3), %eax
    4.88 +	je ldt_ss			# returning to user-space with LDT SS
    4.89 +#endif /* XEN */
    4.90 +restore_nocheck:
    4.91  	testl $VM_MASK, EFLAGS(%esp)
    4.92  	jnz resume_vm86
    4.93  	movb EVENT_MASK(%esp), %al
    4.94 @@ -300,7 +314,19 @@ restore_all:
    4.95  	andb $1,%al			# %al == mask & ~saved_mask
    4.96  	jnz restore_all_enable_events	#     != 0 => reenable event delivery
    4.97  	XEN_PUT_VCPU_INFO(%esi)
    4.98 -	RESTORE_ALL
    4.99 +	RESTORE_REGS
   4.100 +	addl $4, %esp
   4.101 +1:	iret
   4.102 +.section .fixup,"ax"
   4.103 +iret_exc:
   4.104 +	pushl $0			# no error code
   4.105 +	pushl $do_iret_error
   4.106 +	jmp error_code
   4.107 +.previous
   4.108 +.section __ex_table,"a"
   4.109 +	.align 4
   4.110 +	.long 1b,iret_exc
   4.111 +.previous
   4.112  
   4.113  resume_vm86:
   4.114  	XEN_UNBLOCK_EVENTS(%esi)
   4.115 @@ -310,6 +336,33 @@ resume_vm86:
   4.116  	int $0x82
   4.117  	ud2
   4.118  
   4.119 +#if 0 /* XEN */
   4.120 +ldt_ss:
   4.121 +	larl OLDSS(%esp), %eax
   4.122 +	jnz restore_nocheck
   4.123 +	testl $0x00400000, %eax		# returning to 32bit stack?
   4.124 +	jnz restore_nocheck		# allright, normal return
   4.125 +	/* If returning to userspace with 16bit stack,
   4.126 +	 * try to fix the higher word of ESP, as the CPU
   4.127 +	 * won't restore it.
   4.128 +	 * This is an "official" bug of all the x86-compatible
   4.129 +	 * CPUs, which we can try to work around to make
   4.130 +	 * dosemu and wine happy. */
   4.131 +	subl $8, %esp		# reserve space for switch16 pointer
   4.132 +	cli
   4.133 +	movl %esp, %eax
   4.134 +	/* Set up the 16bit stack frame with switch32 pointer on top,
   4.135 +	 * and a switch16 pointer on top of the current frame. */
   4.136 +	call setup_x86_bogus_stack
   4.137 +	RESTORE_REGS
   4.138 +	lss 20+4(%esp), %esp	# switch to 16bit stack
   4.139 +1:	iret
   4.140 +.section __ex_table,"a"
   4.141 +	.align 4
   4.142 +	.long 1b,iret_exc
   4.143 +.previous
   4.144 +#endif /* XEN */
   4.145 +
   4.146  	# perform work that needs to be done immediately before resumption
   4.147  	ALIGN
   4.148  work_pending:
   4.149 @@ -385,6 +438,27 @@ syscall_badsys:
   4.150  	jmp resume_userspace
   4.151  
   4.152  #if 0 /* XEN */
   4.153 +#define FIXUP_ESPFIX_STACK \
   4.154 +	movl %esp, %eax; \
   4.155 +	/* switch to 32bit stack using the pointer on top of 16bit stack */ \
   4.156 +	lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
   4.157 +	/* copy data from 16bit stack to 32bit stack */ \
   4.158 +	call fixup_x86_bogus_stack; \
   4.159 +	/* put ESP to the proper location */ \
   4.160 +	movl %eax, %esp;
   4.161 +#define UNWIND_ESPFIX_STACK \
   4.162 +	pushl %eax; \
   4.163 +	movl %ss, %eax; \
   4.164 +	/* see if on 16bit stack */ \
   4.165 +	cmpw $__ESPFIX_SS, %ax; \
   4.166 +	jne 28f; \
   4.167 +	movl $__KERNEL_DS, %edx; \
   4.168 +	movl %edx, %ds; \
   4.169 +	movl %edx, %es; \
   4.170 +	/* switch to 32bit stack */ \
   4.171 +	FIXUP_ESPFIX_STACK \
   4.172 +28:	popl %eax;
   4.173 +
   4.174  /*
   4.175   * Build the entry stubs and pointer table with
   4.176   * some assembler magic.
   4.177 @@ -440,7 +514,9 @@ error_code:
   4.178  	pushl %ecx
   4.179  	pushl %ebx
   4.180  	cld
   4.181 -	movl %es, %ecx
   4.182 +	pushl %es
   4.183 +#	UNWIND_ESPFIX_STACK
   4.184 +	popl %ecx
   4.185  	movl ES(%esp), %edi		# get the function address
   4.186  	movl ORIG_EAX(%esp), %edx	# get the error code
   4.187  	movl %eax, ORIG_EAX(%esp)
   4.188 @@ -625,6 +701,11 @@ debug_stack_correct:
   4.189   * fault happened on the sysenter path.
   4.190   */
   4.191  ENTRY(nmi)
   4.192 +	pushl %eax
   4.193 +	movl %ss, %eax
   4.194 +	cmpw $__ESPFIX_SS, %ax
   4.195 +	popl %eax
   4.196 +	je nmi_16bit_stack
   4.197  	cmpl $sysenter_entry,(%esp)
   4.198  	je nmi_stack_fixup
   4.199  	pushl %eax
   4.200 @@ -644,7 +725,7 @@ nmi_stack_correct:
   4.201  	xorl %edx,%edx		# zero error code
   4.202  	movl %esp,%eax		# pt_regs pointer
   4.203  	call do_nmi
   4.204 -	RESTORE_ALL
   4.205 +	jmp restore_all
   4.206  
   4.207  nmi_stack_fixup:
   4.208  	FIX_STACK(12,nmi_stack_correct, 1)
   4.209 @@ -659,6 +740,29 @@ nmi_debug_stack_check:
   4.210  nmi_debug_stack_fixup:
   4.211  	FIX_STACK(24,nmi_stack_correct, 1)
   4.212  	jmp nmi_stack_correct
   4.213 +
   4.214 +nmi_16bit_stack:
   4.215 +	/* create the pointer to lss back */
   4.216 +	pushl %ss
   4.217 +	pushl %esp
   4.218 +	movzwl %sp, %esp
   4.219 +	addw $4, (%esp)
   4.220 +	/* copy the iret frame of 12 bytes */
   4.221 +	.rept 3
   4.222 +	pushl 16(%esp)
   4.223 +	.endr
   4.224 +	pushl %eax
   4.225 +	SAVE_ALL
   4.226 +	FIXUP_ESPFIX_STACK		# %eax == %esp
   4.227 +	xorl %edx,%edx			# zero error code
   4.228 +	call do_nmi
   4.229 +	RESTORE_REGS
   4.230 +	lss 12+4(%esp), %esp		# back to 16bit stack
   4.231 +1:	iret
   4.232 +.section __ex_table,"a"
   4.233 +	.align 4
   4.234 +	.long 1b,iret_exc
   4.235 +.previous
   4.236  #endif /* XEN */
   4.237  
   4.238  ENTRY(int3)
   4.239 @@ -725,7 +829,9 @@ ENTRY(page_fault)
   4.240  	pushl %ecx
   4.241  	pushl %ebx
   4.242  	cld
   4.243 -	movl %es,%edi
   4.244 +	pushl %es
   4.245 +#	UNWIND_ESPFIX_STACK
   4.246 +	popl %edi
   4.247  	movl ES(%esp), %ecx		/* get the faulting address */
   4.248  	movl ORIG_EAX(%esp), %edx	/* get the error code */
   4.249  	movl %eax, ORIG_EAX(%esp)
     5.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/head.S	Wed Aug 31 10:24:43 2005 +0000
     5.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/head.S	Thu Sep 01 08:25:22 2005 +0000
     5.3 @@ -179,7 +179,7 @@ ENTRY(cpu_gdt_table)
     5.4  	.quad 0x0000000000000000	/* 0xc0 APM CS 16 code (16 bit) */
     5.5  	.quad 0x0000000000000000	/* 0xc8 APM DS    data */
     5.6  
     5.7 -	.quad 0x0000000000000000	/* 0xd0 - unused */
     5.8 +	.quad 0x0000000000000000	/* 0xd0 - ESPFIX 16-bit SS */
     5.9  	.quad 0x0000000000000000	/* 0xd8 - unused */
    5.10  	.quad 0x0000000000000000	/* 0xe0 - unused */
    5.11  	.quad 0x0000000000000000	/* 0xe8 - unused */
     6.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/irq.c	Wed Aug 31 10:24:43 2005 +0000
     6.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/irq.c	Thu Sep 01 08:25:22 2005 +0000
     6.3 @@ -242,12 +242,12 @@ skip:
     6.4  	} else if (i == NR_IRQS) {
     6.5  		seq_printf(p, "NMI: ");
     6.6  		for_each_cpu(j)
     6.7 -			seq_printf(p, "%10u ", nmi_count(j));
     6.8 + 			seq_printf(p, "%10u ", nmi_count(j));
     6.9  		seq_putc(p, '\n');
    6.10  #ifdef CONFIG_X86_LOCAL_APIC
    6.11  		seq_printf(p, "LOC: ");
    6.12  		for_each_cpu(j)
    6.13 -			seq_printf(p, "%10u ", per_cpu(irq_stat, j).apic_timer_irqs);
    6.14 +			seq_printf(p, "%10u ", per_cpu(irq_stat,j).apic_timer_irqs);
    6.15  		seq_putc(p, '\n');
    6.16  #endif
    6.17  		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
    6.18 @@ -263,6 +263,7 @@ skip:
    6.19  void fixup_irqs(cpumask_t map)
    6.20  {
    6.21  	unsigned int irq;
    6.22 +	static int warned;
    6.23  
    6.24  	for (irq = 0; irq < NR_IRQS; irq++) {
    6.25  		cpumask_t mask;
    6.26 @@ -276,7 +277,7 @@ void fixup_irqs(cpumask_t map)
    6.27  		}
    6.28  		if (irq_desc[irq].handler->set_affinity)
    6.29  			irq_desc[irq].handler->set_affinity(irq, mask);
    6.30 -		else if (irq_desc[irq].action)
    6.31 +		else if (irq_desc[irq].action && !(warned++))
    6.32  			printk("Cannot set affinity for irq %i\n", irq);
    6.33  	}
    6.34  
     7.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c	Wed Aug 31 10:24:43 2005 +0000
     7.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c	Thu Sep 01 08:25:22 2005 +0000
     7.3 @@ -467,7 +467,6 @@ int dump_task_regs(struct task_struct *t
     7.4  	return 1;
     7.5  }
     7.6  
     7.7 -
     7.8  /*
     7.9   *	switch_to(x,yn) should switch tasks from x to y.
    7.10   *
     8.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c	Wed Aug 31 10:24:43 2005 +0000
     8.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c	Thu Sep 01 08:25:22 2005 +0000
     8.3 @@ -1078,12 +1078,12 @@ static void __init reserve_ebda_region(v
     8.4  void __init setup_bootmem_allocator(void);
     8.5  static unsigned long __init setup_memory(void)
     8.6  {
     8.7 -
     8.8  	/*
     8.9  	 * partially used pages are not usable - thus
    8.10  	 * we are rounding upwards:
    8.11  	 */
    8.12 - 	min_low_pfn = PFN_UP(__pa(xen_start_info.pt_base)) + xen_start_info.nr_pt_frames;
    8.13 + 	min_low_pfn = PFN_UP(__pa(xen_start_info.pt_base)) +
    8.14 +		xen_start_info.nr_pt_frames;
    8.15  
    8.16  	find_max_pfn();
    8.17  
     9.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c	Wed Aug 31 10:24:43 2005 +0000
     9.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c	Thu Sep 01 08:25:22 2005 +0000
     9.3 @@ -856,9 +856,6 @@ static int __init do_boot_cpu(int apicid
     9.4  	cpu_gdt_descr[cpu].address = __get_free_page(GFP_KERNEL|__GFP_ZERO);
     9.5  	BUG_ON(cpu_gdt_descr[0].size > PAGE_SIZE);
     9.6  	cpu_gdt_descr[cpu].size = cpu_gdt_descr[0].size;
     9.7 -	printk("GDT: copying %d bytes from %lx to %lx\n",
     9.8 -		cpu_gdt_descr[0].size, cpu_gdt_descr[0].address,
     9.9 -		cpu_gdt_descr[cpu].address); 
    9.10  	memcpy((void *)cpu_gdt_descr[cpu].address,
    9.11  	       (void *)cpu_gdt_descr[0].address, cpu_gdt_descr[0].size);
    9.12  
    9.13 @@ -1274,6 +1271,7 @@ static void __init smp_boot_cpus(unsigne
    9.14  			printk(KERN_WARNING "WARNING: %d siblings found for CPU%d, should be %d\n", siblings, cpu, smp_num_siblings);
    9.15  			smp_num_siblings = siblings;
    9.16  		}
    9.17 +
    9.18  		if (c->x86_num_cores > 1) {
    9.19  			for (i = 0; i < NR_CPUS; i++) {
    9.20  				if (!cpu_isset(i, cpu_callout_map))
    10.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/traps.c	Wed Aug 31 10:24:43 2005 +0000
    10.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/traps.c	Thu Sep 01 08:25:22 2005 +0000
    10.3 @@ -449,10 +449,10 @@ DO_ERROR(10, SIGSEGV, "invalid TSS", inv
    10.4  DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
    10.5  DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)
    10.6  DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
    10.7 -DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
    10.8  #ifdef CONFIG_X86_MCE
    10.9  DO_ERROR(18, SIGBUS, "machine check", machine_check)
   10.10  #endif
   10.11 +DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
   10.12  
   10.13  fastcall void do_general_protection(struct pt_regs * regs, long error_code)
   10.14  {
    11.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c	Wed Aug 31 10:24:43 2005 +0000
    11.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c	Thu Sep 01 08:25:22 2005 +0000
    11.3 @@ -19,287 +19,8 @@
    11.4  #include <asm/pgtable.h>
    11.5  #include <asm/pgalloc.h>
    11.6  
    11.7 -#ifndef CONFIG_XEN_PHYSDEV_ACCESS
    11.8 -
    11.9 -void * __ioremap(unsigned long phys_addr, unsigned long size,
   11.10 -		 unsigned long flags)
   11.11 -{
   11.12 -	return NULL;
   11.13 -}
   11.14 -
   11.15 -void *ioremap_nocache (unsigned long phys_addr, unsigned long size)
   11.16 -{
   11.17 -	return NULL;
   11.18 -}
   11.19 -
   11.20 -void iounmap(volatile void __iomem *addr)
   11.21 -{
   11.22 -}
   11.23 -
   11.24 -#ifdef __i386__
   11.25 -
   11.26 -void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
   11.27 -{
   11.28 -	return NULL;
   11.29 -}
   11.30 -
   11.31 -void __init bt_iounmap(void *addr, unsigned long size)
   11.32 -{
   11.33 -}
   11.34 -
   11.35 -#endif /* __i386__ */
   11.36 -
   11.37 -#else
   11.38 -
   11.39 -/*
   11.40 - * Does @address reside within a non-highmem page that is local to this virtual
   11.41 - * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
   11.42 - * See the comment that accompanies pte_pfn() in pgtable-2level.h to understand
   11.43 - * why this works.
   11.44 - */
   11.45 -static inline int is_local_lowmem(unsigned long address)
   11.46 -{
   11.47 -	extern unsigned long max_low_pfn;
   11.48 -	unsigned long mfn = address >> PAGE_SHIFT;
   11.49 -	unsigned long pfn = mfn_to_pfn(mfn);
   11.50 -	return ((pfn < max_low_pfn) && (phys_to_machine_mapping[pfn] == mfn));
   11.51 -}
   11.52 -
   11.53 -/*
   11.54 - * Generic mapping function (not visible outside):
   11.55 - */
   11.56 -
   11.57 -/*
   11.58 - * Remap an arbitrary physical address space into the kernel virtual
   11.59 - * address space. Needed when the kernel wants to access high addresses
   11.60 - * directly.
   11.61 - *
   11.62 - * NOTE! We need to allow non-page-aligned mappings too: we will obviously
   11.63 - * have to convert them into an offset in a page-aligned mapping, but the
   11.64 - * caller shouldn't need to know that small detail.
   11.65 - */
   11.66 -void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
   11.67 -{
   11.68 -	void __iomem * addr;
   11.69 -	struct vm_struct * area;
   11.70 -	unsigned long offset, last_addr;
   11.71 -	domid_t domid = DOMID_IO;
   11.72 -
   11.73 -	/* Don't allow wraparound or zero size */
   11.74 -	last_addr = phys_addr + size - 1;
   11.75 -	if (!size || last_addr < phys_addr)
   11.76 -		return NULL;
   11.77 -
   11.78 -#ifdef CONFIG_XEN_PRIVILEGED_GUEST
   11.79 -	/*
   11.80 -	 * Don't remap the low PCI/ISA area, it's always mapped..
   11.81 -	 */
   11.82 -	if (phys_addr >= 0x0 && last_addr < 0x100000)
   11.83 -		return isa_bus_to_virt(phys_addr);
   11.84 -#endif
   11.85 -
   11.86 -	/*
   11.87 -	 * Don't allow anybody to remap normal RAM that we're using..
   11.88 -	 */
   11.89 -	if (is_local_lowmem(phys_addr)) {
   11.90 -		char *t_addr, *t_end;
   11.91 -		struct page *page;
   11.92 -
   11.93 -		t_addr = bus_to_virt(phys_addr);
   11.94 -		t_end = t_addr + (size - 1);
   11.95 -	   
   11.96 -		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
   11.97 -			if(!PageReserved(page))
   11.98 -				return NULL;
   11.99 -
  11.100 -		domid = DOMID_SELF;
  11.101 -	}
  11.102 -
  11.103 -	/*
  11.104 -	 * Mappings have to be page-aligned
  11.105 -	 */
  11.106 -	offset = phys_addr & ~PAGE_MASK;
  11.107 -	phys_addr &= PAGE_MASK;
  11.108 -	size = PAGE_ALIGN(last_addr+1) - phys_addr;
  11.109 -
  11.110 -	/*
  11.111 -	 * Ok, go for it..
  11.112 -	 */
  11.113 -	area = get_vm_area(size, VM_IOREMAP | (flags << 20));
  11.114 -	if (!area)
  11.115 -		return NULL;
  11.116 -	area->phys_addr = phys_addr;
  11.117 -	addr = (void __iomem *) area->addr;
  11.118 -	flags |= _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED;
  11.119 -#ifdef __x86_64__
  11.120 -	flags |= _PAGE_USER;
  11.121 -#endif
  11.122 -	if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr,
  11.123 -				    size, __pgprot(flags), domid)) {
  11.124 -		vunmap((void __force *) addr);
  11.125 -		return NULL;
  11.126 -	}
  11.127 -	return (void __iomem *) (offset + (char __iomem *)addr);
  11.128 -}
  11.129 -
  11.130 -
  11.131 -/**
  11.132 - * ioremap_nocache     -   map bus memory into CPU space
  11.133 - * @offset:    bus address of the memory
  11.134 - * @size:      size of the resource to map
  11.135 - *
  11.136 - * ioremap_nocache performs a platform specific sequence of operations to
  11.137 - * make bus memory CPU accessible via the readb/readw/readl/writeb/
  11.138 - * writew/writel functions and the other mmio helpers. The returned
  11.139 - * address is not guaranteed to be usable directly as a virtual
  11.140 - * address. 
  11.141 - *
  11.142 - * This version of ioremap ensures that the memory is marked uncachable
  11.143 - * on the CPU as well as honouring existing caching rules from things like
  11.144 - * the PCI bus. Note that there are other caches and buffers on many 
  11.145 - * busses. In particular driver authors should read up on PCI writes
  11.146 - *
  11.147 - * It's useful if some control registers are in such an area and
  11.148 - * write combining or read caching is not desirable:
  11.149 - * 
  11.150 - * Must be freed with iounmap.
  11.151 - */
  11.152 -
  11.153 -void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
  11.154 -{
  11.155 -	unsigned long last_addr;
  11.156 -	void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
  11.157 -	if (!p) 
  11.158 -		return p; 
  11.159 -
  11.160 -	/* Guaranteed to be > phys_addr, as per __ioremap() */
  11.161 -	last_addr = phys_addr + size - 1;
  11.162 -
  11.163 -	if (is_local_lowmem(last_addr)) { 
  11.164 -		struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
  11.165 -		unsigned long npages;
  11.166 -
  11.167 -		phys_addr &= PAGE_MASK;
  11.168 -
  11.169 -		/* This might overflow and become zero.. */
  11.170 -		last_addr = PAGE_ALIGN(last_addr);
  11.171 -
  11.172 -		/* .. but that's ok, because modulo-2**n arithmetic will make
  11.173 -	 	* the page-aligned "last - first" come out right.
  11.174 -	 	*/
  11.175 -		npages = (last_addr - phys_addr) >> PAGE_SHIFT;
  11.176 -
  11.177 -		if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { 
  11.178 -			iounmap(p); 
  11.179 -			p = NULL;
  11.180 -		}
  11.181 -		global_flush_tlb();
  11.182 -	}
  11.183 -
  11.184 -	return p;					
  11.185 -}
  11.186 -
  11.187 -void iounmap(volatile void __iomem *addr)
  11.188 -{
  11.189 -	struct vm_struct *p;
  11.190 -	if ((void __force *) addr <= high_memory) 
  11.191 -		return; 
  11.192 -#ifdef CONFIG_XEN_PRIVILEGED_GUEST
  11.193 -	if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
  11.194 -		return;
  11.195 -#endif
  11.196 -	p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
  11.197 -	if (!p) { 
  11.198 -		printk("__iounmap: bad address %p\n", addr);
  11.199 -		return;
  11.200 -	}
  11.201 -
  11.202 -	if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
  11.203 -		/* p->size includes the guard page, but cpa doesn't like that */
  11.204 -		change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
  11.205 -				 (p->size - PAGE_SIZE) >> PAGE_SHIFT,
  11.206 -				 PAGE_KERNEL); 				 
  11.207 -		global_flush_tlb();
  11.208 -	} 
  11.209 -	kfree(p); 
  11.210 -}
  11.211 -
  11.212 -#ifdef __i386__
  11.213 -
  11.214 -void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
  11.215 -{
  11.216 -	unsigned long offset, last_addr;
  11.217 -	unsigned int nrpages;
  11.218 -	enum fixed_addresses idx;
  11.219 -
  11.220 -	/* Don't allow wraparound or zero size */
  11.221 -	last_addr = phys_addr + size - 1;
  11.222 -	if (!size || last_addr < phys_addr)
  11.223 -		return NULL;
  11.224 -
  11.225 -#ifdef CONFIG_XEN_PRIVILEGED_GUEST
  11.226 -	/*
  11.227 -	 * Don't remap the low PCI/ISA area, it's always mapped..
  11.228 -	 */
  11.229 -	if (phys_addr >= 0x0 && last_addr < 0x100000)
  11.230 -		return isa_bus_to_virt(phys_addr);
  11.231 -#endif
  11.232 -
  11.233 -	/*
  11.234 -	 * Mappings have to be page-aligned
  11.235 -	 */
  11.236 -	offset = phys_addr & ~PAGE_MASK;
  11.237 -	phys_addr &= PAGE_MASK;
  11.238 -	size = PAGE_ALIGN(last_addr) - phys_addr;
  11.239 -
  11.240 -	/*
  11.241 -	 * Mappings have to fit in the FIX_BTMAP area.
  11.242 -	 */
  11.243 -	nrpages = size >> PAGE_SHIFT;
  11.244 -	if (nrpages > NR_FIX_BTMAPS)
  11.245 -		return NULL;
  11.246 -
  11.247 -	/*
  11.248 -	 * Ok, go for it..
  11.249 -	 */
  11.250 -	idx = FIX_BTMAP_BEGIN;
  11.251 -	while (nrpages > 0) {
  11.252 -		set_fixmap(idx, phys_addr);
  11.253 -		phys_addr += PAGE_SIZE;
  11.254 -		--idx;
  11.255 -		--nrpages;
  11.256 -	}
  11.257 -	return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
  11.258 -}
  11.259 -
  11.260 -void __init bt_iounmap(void *addr, unsigned long size)
  11.261 -{
  11.262 -	unsigned long virt_addr;
  11.263 -	unsigned long offset;
  11.264 -	unsigned int nrpages;
  11.265 -	enum fixed_addresses idx;
  11.266 -
  11.267 -	virt_addr = (unsigned long)addr;
  11.268 -	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
  11.269 -		return;
  11.270 -#ifdef CONFIG_XEN_PRIVILEGED_GUEST
  11.271 -	if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
  11.272 -		return;
  11.273 -#endif
  11.274 -	offset = virt_addr & ~PAGE_MASK;
  11.275 -	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
  11.276 -
  11.277 -	idx = FIX_BTMAP_BEGIN;
  11.278 -	while (nrpages > 0) {
  11.279 -		clear_fixmap(idx);
  11.280 -		--idx;
  11.281 -		--nrpages;
  11.282 -	}
  11.283 -}
  11.284 -
  11.285 -#endif /* __i386__ */
  11.286 -
  11.287 -#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
  11.288 +#define ISA_START_ADDRESS	0x0
  11.289 +#define ISA_END_ADDRESS		0x100000
  11.290  
  11.291  /* These hacky macros avoid phys->machine translations. */
  11.292  #define __direct_pte(x) ((pte_t) { (x) } )
  11.293 @@ -413,6 +134,292 @@ int touch_pte_range(struct mm_struct *mm
  11.294  
  11.295  EXPORT_SYMBOL(touch_pte_range);
  11.296  
  11.297 +#ifdef CONFIG_XEN_PHYSDEV_ACCESS
  11.298 +
  11.299 +/*
  11.300 + * Does @address reside within a non-highmem page that is local to this virtual
  11.301 + * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
  11.302 + * See the comment that accompanies pte_pfn() in pgtable-2level.h to understand
  11.303 + * why this works.
  11.304 + */
  11.305 +static inline int is_local_lowmem(unsigned long address)
  11.306 +{
  11.307 +	extern unsigned long max_low_pfn;
  11.308 +	unsigned long mfn = address >> PAGE_SHIFT;
  11.309 +	unsigned long pfn = mfn_to_pfn(mfn);
  11.310 +	return ((pfn < max_low_pfn) && (phys_to_machine_mapping[pfn] == mfn));
  11.311 +}
  11.312 +
  11.313 +/*
  11.314 + * Generic mapping function (not visible outside):
  11.315 + */
  11.316 +
  11.317 +/*
  11.318 + * Remap an arbitrary physical address space into the kernel virtual
  11.319 + * address space. Needed when the kernel wants to access high addresses
  11.320 + * directly.
  11.321 + *
  11.322 + * NOTE! We need to allow non-page-aligned mappings too: we will obviously
  11.323 + * have to convert them into an offset in a page-aligned mapping, but the
  11.324 + * caller shouldn't need to know that small detail.
  11.325 + */
  11.326 +void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
  11.327 +{
  11.328 +	void __iomem * addr;
  11.329 +	struct vm_struct * area;
  11.330 +	unsigned long offset, last_addr;
  11.331 +	domid_t domid = DOMID_IO;
  11.332 +
  11.333 +	/* Don't allow wraparound or zero size */
  11.334 +	last_addr = phys_addr + size - 1;
  11.335 +	if (!size || last_addr < phys_addr)
  11.336 +		return NULL;
  11.337 +
  11.338 +	/*
  11.339 +	 * Don't remap the low PCI/ISA area, it's always mapped..
  11.340 +	 */
  11.341 +	if (xen_start_info.flags & SIF_PRIVILEGED &&
  11.342 +	    phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
  11.343 +		return (void __iomem *) isa_bus_to_virt(phys_addr);
  11.344 +
  11.345 +	/*
  11.346 +	 * Don't allow anybody to remap normal RAM that we're using..
  11.347 +	 */
  11.348 +	if (is_local_lowmem(phys_addr)) {
  11.349 +		char *t_addr, *t_end;
  11.350 +		struct page *page;
  11.351 +
  11.352 +		t_addr = bus_to_virt(phys_addr);
  11.353 +		t_end = t_addr + (size - 1);
  11.354 +	   
  11.355 +		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
  11.356 +			if(!PageReserved(page))
  11.357 +				return NULL;
  11.358 +
  11.359 +		domid = DOMID_SELF;
  11.360 +	}
  11.361 +
  11.362 +	/*
  11.363 +	 * Mappings have to be page-aligned
  11.364 +	 */
  11.365 +	offset = phys_addr & ~PAGE_MASK;
  11.366 +	phys_addr &= PAGE_MASK;
  11.367 +	size = PAGE_ALIGN(last_addr+1) - phys_addr;
  11.368 +
  11.369 +	/*
  11.370 +	 * Ok, go for it..
  11.371 +	 */
  11.372 +	area = get_vm_area(size, VM_IOREMAP | (flags << 20));
  11.373 +	if (!area)
  11.374 +		return NULL;
  11.375 +	area->phys_addr = phys_addr;
  11.376 +	addr = (void __iomem *) area->addr;
  11.377 +	flags |= _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED;
  11.378 +#ifdef __x86_64__
  11.379 +	flags |= _PAGE_USER;
  11.380 +#endif
  11.381 +	if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr,
  11.382 +				    size, __pgprot(flags), domid)) {
  11.383 +		vunmap((void __force *) addr);
  11.384 +		return NULL;
  11.385 +	}
  11.386 +	return (void __iomem *) (offset + (char __iomem *)addr);
  11.387 +}
  11.388 +
  11.389 +
  11.390 +/**
  11.391 + * ioremap_nocache     -   map bus memory into CPU space
  11.392 + * @offset:    bus address of the memory
  11.393 + * @size:      size of the resource to map
  11.394 + *
  11.395 + * ioremap_nocache performs a platform specific sequence of operations to
  11.396 + * make bus memory CPU accessible via the readb/readw/readl/writeb/
  11.397 + * writew/writel functions and the other mmio helpers. The returned
  11.398 + * address is not guaranteed to be usable directly as a virtual
  11.399 + * address. 
  11.400 + *
  11.401 + * This version of ioremap ensures that the memory is marked uncachable
  11.402 + * on the CPU as well as honouring existing caching rules from things like
  11.403 + * the PCI bus. Note that there are other caches and buffers on many 
  11.404 + * busses. In particular driver authors should read up on PCI writes
  11.405 + *
  11.406 + * It's useful if some control registers are in such an area and
  11.407 + * write combining or read caching is not desirable:
  11.408 + * 
  11.409 + * Must be freed with iounmap.
  11.410 + */
  11.411 +
  11.412 +void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
  11.413 +{
  11.414 +	unsigned long last_addr;
  11.415 +	void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
  11.416 +	if (!p) 
  11.417 +		return p; 
  11.418 +
  11.419 +	/* Guaranteed to be > phys_addr, as per __ioremap() */
  11.420 +	last_addr = phys_addr + size - 1;
  11.421 +
  11.422 +	if (is_local_lowmem(last_addr)) { 
  11.423 +		struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
  11.424 +		unsigned long npages;
  11.425 +
  11.426 +		phys_addr &= PAGE_MASK;
  11.427 +
  11.428 +		/* This might overflow and become zero.. */
  11.429 +		last_addr = PAGE_ALIGN(last_addr);
  11.430 +
  11.431 +		/* .. but that's ok, because modulo-2**n arithmetic will make
  11.432 +	 	* the page-aligned "last - first" come out right.
  11.433 +	 	*/
  11.434 +		npages = (last_addr - phys_addr) >> PAGE_SHIFT;
  11.435 +
  11.436 +		if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { 
  11.437 +			iounmap(p); 
  11.438 +			p = NULL;
  11.439 +		}
  11.440 +		global_flush_tlb();
  11.441 +	}
  11.442 +
  11.443 +	return p;					
  11.444 +}
  11.445 +
  11.446 +void iounmap(volatile void __iomem *addr)
  11.447 +{
  11.448 +	struct vm_struct *p;
  11.449 +	if ((void __force *) addr <= high_memory) 
  11.450 +		return;
  11.451 +
  11.452 +	/*
  11.453 +	 * __ioremap special-cases the PCI/ISA range by not instantiating a
  11.454 +	 * vm_area and by simply returning an address into the kernel mapping
  11.455 +	 * of ISA space.   So handle that here.
  11.456 +	 */
  11.457 +	if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
  11.458 +		return;
  11.459 +
  11.460 +	write_lock(&vmlist_lock);
  11.461 +	p = __remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
  11.462 +	if (!p) { 
  11.463 +		printk("iounmap: bad address %p\n", addr);
  11.464 +		goto out_unlock;
  11.465 +	}
  11.466 +
  11.467 +	if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
  11.468 +		/* p->size includes the guard page, but cpa doesn't like that */
  11.469 +		change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
  11.470 +				 (p->size - PAGE_SIZE) >> PAGE_SHIFT,
  11.471 +				 PAGE_KERNEL);
  11.472 +		global_flush_tlb();
  11.473 +	} 
  11.474 +out_unlock:
  11.475 +	write_unlock(&vmlist_lock);
  11.476 +	kfree(p); 
  11.477 +}
  11.478 +
  11.479 +#ifdef __i386__
  11.480 +
  11.481 +void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
  11.482 +{
  11.483 +	unsigned long offset, last_addr;
  11.484 +	unsigned int nrpages;
  11.485 +	enum fixed_addresses idx;
  11.486 +
  11.487 +	/* Don't allow wraparound or zero size */
  11.488 +	last_addr = phys_addr + size - 1;
  11.489 +	if (!size || last_addr < phys_addr)
  11.490 +		return NULL;
  11.491 +
  11.492 +	/*
  11.493 +	 * Don't remap the low PCI/ISA area, it's always mapped..
  11.494 +	 */
  11.495 +	if (xen_start_info.flags & SIF_PRIVILEGED &&
  11.496 +	    phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
  11.497 +		return isa_bus_to_virt(phys_addr);
  11.498 +
  11.499 +	/*
  11.500 +	 * Mappings have to be page-aligned
  11.501 +	 */
  11.502 +	offset = phys_addr & ~PAGE_MASK;
  11.503 +	phys_addr &= PAGE_MASK;
  11.504 +	size = PAGE_ALIGN(last_addr) - phys_addr;
  11.505 +
  11.506 +	/*
  11.507 +	 * Mappings have to fit in the FIX_BTMAP area.
  11.508 +	 */
  11.509 +	nrpages = size >> PAGE_SHIFT;
  11.510 +	if (nrpages > NR_FIX_BTMAPS)
  11.511 +		return NULL;
  11.512 +
  11.513 +	/*
  11.514 +	 * Ok, go for it..
  11.515 +	 */
  11.516 +	idx = FIX_BTMAP_BEGIN;
  11.517 +	while (nrpages > 0) {
  11.518 +		set_fixmap(idx, phys_addr);
  11.519 +		phys_addr += PAGE_SIZE;
  11.520 +		--idx;
  11.521 +		--nrpages;
  11.522 +	}
  11.523 +	return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
  11.524 +}
  11.525 +
  11.526 +void __init bt_iounmap(void *addr, unsigned long size)
  11.527 +{
  11.528 +	unsigned long virt_addr;
  11.529 +	unsigned long offset;
  11.530 +	unsigned int nrpages;
  11.531 +	enum fixed_addresses idx;
  11.532 +
  11.533 +	virt_addr = (unsigned long)addr;
  11.534 +	if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
  11.535 +		return;
  11.536 +	if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
  11.537 +		return;
  11.538 +	offset = virt_addr & ~PAGE_MASK;
  11.539 +	nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
  11.540 +
  11.541 +	idx = FIX_BTMAP_BEGIN;
  11.542 +	while (nrpages > 0) {
  11.543 +		clear_fixmap(idx);
  11.544 +		--idx;
  11.545 +		--nrpages;
  11.546 +	}
  11.547 +}
  11.548 +
  11.549 +#endif /* __i386__ */
  11.550 +
  11.551 +#else /* CONFIG_XEN_PHYSDEV_ACCESS */
  11.552 +
  11.553 +void __iomem * __ioremap(unsigned long phys_addr, unsigned long size,
  11.554 +			 unsigned long flags)
  11.555 +{
  11.556 +	return NULL;
  11.557 +}
  11.558 +
  11.559 +void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
  11.560 +{
  11.561 +	return NULL;
  11.562 +}
  11.563 +
  11.564 +void iounmap(volatile void __iomem *addr)
  11.565 +{
  11.566 +}
  11.567 +
  11.568 +#ifdef __i386__
  11.569 +
  11.570 +void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
  11.571 +{
  11.572 +	return NULL;
  11.573 +}
  11.574 +
  11.575 +void __init bt_iounmap(void *addr, unsigned long size)
  11.576 +{
  11.577 +}
  11.578 +
  11.579 +#endif /* __i386__ */
  11.580 +
  11.581 +#endif /* CONFIG_XEN_PHYSDEV_ACCESS */
  11.582 +
  11.583  /*
  11.584   * Local variables:
  11.585   *  c-file-style: "linux"
    12.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/Kconfig	Wed Aug 31 10:24:43 2005 +0000
    12.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/Kconfig	Thu Sep 01 08:25:22 2005 +0000
    12.3 @@ -21,13 +21,13 @@ config X86_64
    12.4  	  classical 32-bit x86 architecture. For details see
    12.5  	  <http://www.x86-64.org/>.
    12.6  
    12.7 +config 64BIT
    12.8 +	def_bool y
    12.9 +
   12.10  config X86
   12.11  	bool
   12.12  	default y
   12.13  
   12.14 -config 64BIT
   12.15 -	def_bool y
   12.16 -
   12.17  config MMU
   12.18  	bool
   12.19  	default y
   12.20 @@ -89,10 +89,11 @@ choice
   12.21  #	  Optimize for AMD Opteron/Athlon64/Hammer/K8 CPUs.
   12.22  
   12.23  config MPSC
   12.24 -       bool "Intel x86-64"
   12.25 +       bool "Intel EM64T"
   12.26         help
   12.27 -	  Optimize for Intel IA32 with 64bit extension CPUs
   12.28 -	  (Prescott/Nocona/Potomac)
   12.29 +	  Optimize for Intel Pentium 4 and Xeon CPUs with Intel
   12.30 +	  Extended Memory 64 Technology(EM64T). For details see
   12.31 +	  <http://www.intel.com/technology/64bitextensions/>.
   12.32  
   12.33  config GENERIC_CPU
   12.34  	bool "Generic-x86-64"
   12.35 @@ -367,7 +368,6 @@ config SECCOMP
   12.36  
   12.37  	  If unsure, say Y. Only embedded should say N here.
   12.38  
   12.39 -
   12.40  endmenu
   12.41  
   12.42  #
    13.1 --- a/linux-2.6-xen-sparse/drivers/char/mem.c	Wed Aug 31 10:24:43 2005 +0000
    13.2 +++ b/linux-2.6-xen-sparse/drivers/char/mem.c	Thu Sep 01 08:25:22 2005 +0000
    13.3 @@ -231,7 +231,7 @@ static ssize_t write_mem(struct file * f
    13.4  }
    13.5  #endif
    13.6  
    13.7 -static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
    13.8 +static int mmap_mem(struct file * file, struct vm_area_struct * vma)
    13.9  {
   13.10  #if defined(__HAVE_PHYS_MEM_ACCESS_PROT)
   13.11  	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
   13.12 @@ -258,7 +258,6 @@ static int mmap_kmem(struct file * file,
   13.13  	return 0;
   13.14  }
   13.15  
   13.16 -#if 0
   13.17  static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
   13.18  {
   13.19          unsigned long long val;
   13.20 @@ -275,7 +274,6 @@ static int mmap_kmem(struct file * file,
   13.21  	vma->vm_pgoff = __pa(val) >> PAGE_SHIFT;
   13.22  	return mmap_mem(file, vma);
   13.23  }
   13.24 -#endif
   13.25  
   13.26  extern long vread(char *buf, char *addr, unsigned long count);
   13.27  extern long vwrite(char *buf, char *addr, unsigned long count);
   13.28 @@ -731,7 +729,7 @@ static struct file_operations mem_fops =
   13.29  	.llseek		= memory_lseek,
   13.30  	.read		= read_mem,
   13.31  	.write		= write_mem,
   13.32 -	.mmap		= mmap_kmem,
   13.33 +	.mmap		= mmap_mem,
   13.34  	.open		= open_mem,
   13.35  };
   13.36  #else
    14.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mmu_context.h	Wed Aug 31 10:24:43 2005 +0000
    14.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mmu_context.h	Thu Sep 01 08:25:22 2005 +0000
    14.3 @@ -35,9 +35,9 @@ static inline void __prepare_arch_switch
    14.4  	 * happen before reload of cr3/ldt (i.e., not in __switch_to).
    14.5  	 */
    14.6  	asm volatile ( "mov %%fs,%0 ; mov %%gs,%1"
    14.7 -		: "=m" (*(int *)&current->thread.fs),
    14.8 -		  "=m" (*(int *)&current->thread.gs));
    14.9 -	asm volatile ( "mov %0,%%fs ; mov %0,%%gs"
   14.10 +		: "=m" (current->thread.fs),
   14.11 +		  "=m" (current->thread.gs));
   14.12 +	asm volatile ( "movl %0,%%fs ; movl %0,%%gs"
   14.13  		: : "r" (0) );
   14.14  }
   14.15  
    15.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/processor.h	Wed Aug 31 10:24:43 2005 +0000
    15.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/processor.h	Thu Sep 01 08:25:22 2005 +0000
    15.3 @@ -517,8 +517,8 @@ static inline void load_esp0(struct tss_
    15.4   * This special macro can be used to load a debugging register
    15.5   */
    15.6  #define loaddebug(thread,register) \
    15.7 -	HYPERVISOR_set_debugreg((register),     \
    15.8 -			((thread)->debugreg[register]))
    15.9 +		HYPERVISOR_set_debugreg((register), \
   15.10 +					((thread)->debugreg[register]))
   15.11  
   15.12  /* Forward declaration, a strange C thing */
   15.13  struct task_struct;