direct-io.hg

changeset 4427:3b7f9e76f29a

bitkeeper revision 1.1236.1.187 (424d5d8cR-dhzBJoJYstmA_JbmRm5g)

Batch cr3 and ldt switches into a single mmuext_op hypercall. This will
also avoid one unnecessary TLB flush per context switch.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Apr 01 14:41:16 2005 +0000 (2005-04-01)
parents 5396c1c1d634
children c2f2989b8637
files linux-2.4.29-xen-sparse/include/asm-xen/mmu_context.h linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
line diff
     1.1 --- a/linux-2.4.29-xen-sparse/include/asm-xen/mmu_context.h	Fri Apr 01 13:57:45 2005 +0000
     1.2 +++ b/linux-2.4.29-xen-sparse/include/asm-xen/mmu_context.h	Fri Apr 01 14:41:16 2005 +0000
     1.3 @@ -31,17 +31,25 @@ extern pgd_t *cur_pgd;
     1.4  
     1.5  static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
     1.6  {
     1.7 +	struct mmuext_op _op[2], *op = _op;
     1.8  	if (prev != next) {
     1.9  		/* stop flush ipis for the previous mm */
    1.10  		clear_bit(cpu, &prev->cpu_vm_mask);
    1.11  		/* Re-load page tables */
    1.12  		cur_pgd = next->pgd;
    1.13 -		xen_pt_switch(__pa(cur_pgd));
    1.14 +		op->cmd = MMUEXT_NEW_BASEPTR;
    1.15 +		op->mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
    1.16 +		op++;
    1.17  		/* load_LDT, if either the previous or next thread
    1.18  		 * has a non-default LDT.
    1.19  		 */
    1.20 -		if (next->context.size+prev->context.size)
    1.21 -			load_LDT(&next->context);
    1.22 +		if (next->context.size+prev->context.size) {
    1.23 +			op->cmd = MMUEXT_SET_LDT;
    1.24 +			op->linear_addr = (unsigned long)next->context.ldt;
    1.25 +			op->nr_ents     = next->context.size;
    1.26 +			op++;
    1.27 +		}
    1.28 +		BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
    1.29  	}
    1.30  }
    1.31  
     2.1 --- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu_context.h	Fri Apr 01 13:57:45 2005 +0000
     2.2 +++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu_context.h	Fri Apr 01 14:41:16 2005 +0000
     2.3 @@ -46,6 +46,7 @@ static inline void switch_mm(struct mm_s
     2.4  			     struct task_struct *tsk)
     2.5  {
     2.6  	int cpu = smp_processor_id();
     2.7 +	struct mmuext_op _op[2], *op = _op;
     2.8  
     2.9  	if (likely(prev != next)) {
    2.10  		/* stop flush ipis for the previous mm */
    2.11 @@ -56,14 +57,24 @@ static inline void switch_mm(struct mm_s
    2.12  #endif
    2.13  		cpu_set(cpu, next->cpu_vm_mask);
    2.14  
    2.15 -		/* Re-load page tables */
    2.16 -		load_cr3(next->pgd);
    2.17 +		/* Re-load page tables: load_cr3(next->pgd) */
    2.18 +		per_cpu(cur_pgd, cpu) = next->pgd;
    2.19 +		op->cmd = MMUEXT_NEW_BASEPTR;
    2.20 +		op->mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
    2.21 +		op++;
    2.22  
    2.23  		/*
    2.24  		 * load the LDT, if the LDT is different:
    2.25  		 */
    2.26 -		if (unlikely(prev->context.ldt != next->context.ldt))
    2.27 -			load_LDT_nolock(&next->context, cpu);
    2.28 +		if (unlikely(prev->context.ldt != next->context.ldt)) {
    2.29 +			/* load_LDT_nolock(&next->context, cpu) */
    2.30 +			op->cmd = MMUEXT_SET_LDT;
    2.31 +			op->linear_addr = (unsigned long)next->context.ldt;
    2.32 +			op->nr_ents     = next->context.size;
    2.33 +			op++;
    2.34 +		}
    2.35 +
    2.36 +		BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
    2.37  	}
    2.38  #if 0 /* XEN */
    2.39  	else {