ia64/xen-unstable

changeset 8085:f37f1c9ec2ec

Merged.
author emellor@leeni.uk.xensource.com
date Sun Nov 27 01:06:44 2005 +0000 (2005-11-27)
parents b67f9f21fd9c bf09a8db5bb4
children 85a1a57320a6
files
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c	Sun Nov 27 01:06:20 2005 +0000
     1.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c	Sun Nov 27 01:06:44 2005 +0000
     1.3 @@ -278,26 +278,22 @@ void pgd_ctor(void *pgd, kmem_cache_t *c
     1.4  	unsigned long flags;
     1.5  
     1.6  	if (PTRS_PER_PMD > 1) {
     1.7 -#ifdef CONFIG_XEN
     1.8  		/* Ensure pgd resides below 4GB. */
     1.9  		int rc = xen_create_contiguous_region(
    1.10  			(unsigned long)pgd, 0, 32);
    1.11  		BUG_ON(rc);
    1.12 -#endif
    1.13  		if (HAVE_SHARED_KERNEL_PMD)
    1.14  			memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
    1.15 -			       swapper_pg_dir, sizeof(pgd_t));
    1.16 +			       swapper_pg_dir + USER_PTRS_PER_PGD,
    1.17 +			       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
    1.18  	} else {
    1.19 -		if (!HAVE_SHARED_KERNEL_PMD)
    1.20 -			spin_lock_irqsave(&pgd_lock, flags);
    1.21 +		spin_lock_irqsave(&pgd_lock, flags);
    1.22  		memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
    1.23  		       swapper_pg_dir + USER_PTRS_PER_PGD,
    1.24  		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
    1.25  		memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
    1.26 -		if (!HAVE_SHARED_KERNEL_PMD) {
    1.27 -			pgd_list_add(pgd);
    1.28 -			spin_unlock_irqrestore(&pgd_lock, flags);
    1.29 -		}
    1.30 +		pgd_list_add(pgd);
    1.31 +		spin_unlock_irqrestore(&pgd_lock, flags);
    1.32  	}
    1.33  }
    1.34  
    1.35 @@ -306,9 +302,6 @@ void pgd_dtor(void *pgd, kmem_cache_t *c
    1.36  {
    1.37  	unsigned long flags; /* can be called from interrupt context */
    1.38  
    1.39 -	if (HAVE_SHARED_KERNEL_PMD)
    1.40 -		return;
    1.41 -
    1.42  	spin_lock_irqsave(&pgd_lock, flags);
    1.43  	pgd_list_del(pgd);
    1.44  	spin_unlock_irqrestore(&pgd_lock, flags);
    1.45 @@ -335,18 +328,24 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
    1.46  
    1.47  	if (!HAVE_SHARED_KERNEL_PMD) {
    1.48  		unsigned long flags;
    1.49 -		pgd_t *copy_pgd = pgd_offset_k(PAGE_OFFSET);
    1.50 -		pud_t *copy_pud = pud_offset(copy_pgd, PAGE_OFFSET);
    1.51 -		pmd_t *copy_pmd = pmd_offset(copy_pud, PAGE_OFFSET);
    1.52 -		pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
    1.53 -		++i;
    1.54 -		if (!pmd)
    1.55 -			goto out_oom;
    1.56 +
    1.57 +		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
    1.58 +			pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
    1.59 +			if (!pmd)
    1.60 +				goto out_oom;
    1.61 +			set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
    1.62 +		}
    1.63  
    1.64  		spin_lock_irqsave(&pgd_lock, flags);
    1.65 -		memcpy(pmd, copy_pmd, PAGE_SIZE);
    1.66 -		make_lowmem_page_readonly(pmd);
    1.67 -		set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
    1.68 +		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
    1.69 +			unsigned long v = (unsigned long)i << PGDIR_SHIFT;
    1.70 +			pgd_t *kpgd = pgd_offset_k(v);
    1.71 +			pud_t *kpud = pud_offset(kpgd, v);
    1.72 +			pmd_t *kpmd = pmd_offset(kpud, v);
    1.73 +			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
    1.74 +			memcpy(pmd, kpmd, PAGE_SIZE);
    1.75 +			make_lowmem_page_readonly(pmd);
    1.76 +		}
    1.77  		pgd_list_add(pgd);
    1.78  		spin_unlock_irqrestore(&pgd_lock, flags);
    1.79  	}
    1.80 @@ -374,13 +373,15 @@ void pgd_free(pgd_t *pgd)
    1.81  		}
    1.82  		if (!HAVE_SHARED_KERNEL_PMD) {
    1.83  			unsigned long flags;
    1.84 -			pmd_t *pmd = (void *)__va(pgd_val(pgd[USER_PTRS_PER_PGD])-1);
    1.85  			spin_lock_irqsave(&pgd_lock, flags);
    1.86  			pgd_list_del(pgd);
    1.87  			spin_unlock_irqrestore(&pgd_lock, flags);
    1.88 -			make_lowmem_page_writable(pmd);
    1.89 -			memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
    1.90 -			kmem_cache_free(pmd_cache, pmd);
    1.91 +			for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
    1.92 +				pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
    1.93 +				make_lowmem_page_writable(pmd);
    1.94 +				memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
    1.95 +				kmem_cache_free(pmd_cache, pmd);
    1.96 +			}
    1.97  		}
    1.98  	}
    1.99  	/* in the non-PAE case, free_pgtables() clears user pgd entries */
     2.1 --- a/patches/linux-2.6.12/pmd-shared.patch	Sun Nov 27 01:06:20 2005 +0000
     2.2 +++ b/patches/linux-2.6.12/pmd-shared.patch	Sun Nov 27 01:06:44 2005 +0000
     2.3 @@ -11,14 +11,20 @@ diff -urNpP linux-2.6.12/arch/i386/mm/pa
     2.4   
     2.5   	spin_lock_irqsave(&pgd_lock, flags);
     2.6  diff -urNpP linux-2.6.12/arch/i386/mm/pgtable.c linux-2.6.12.new/arch/i386/mm/pgtable.c
     2.7 ---- linux-2.6.12/arch/i386/mm/pgtable.c	2005-11-24 21:51:49.000000000 +0000
     2.8 -+++ linux-2.6.12.new/arch/i386/mm/pgtable.c	2005-11-24 22:06:04.000000000 +0000
     2.9 -@@ -199,19 +199,22 @@ void pgd_ctor(void *pgd, kmem_cache_t *c
    2.10 +--- linux-2.6.12/arch/i386/mm/pgtable.c	2005-11-26 09:55:10.000000000 +0000
    2.11 ++++ linux-2.6.12.new/arch/i386/mm/pgtable.c	2005-11-26 10:20:36.000000000 +0000
    2.12 +@@ -199,19 +199,20 @@ void pgd_ctor(void *pgd, kmem_cache_t *c
    2.13   {
    2.14   	unsigned long flags;
    2.15   
    2.16  -	if (PTRS_PER_PMD == 1)
    2.17 --		spin_lock_irqsave(&pgd_lock, flags);
    2.18 ++	if (PTRS_PER_PMD > 1) {
    2.19 ++		if (HAVE_SHARED_KERNEL_PMD)
    2.20 ++			memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
    2.21 ++			       swapper_pg_dir + USER_PTRS_PER_PGD,
    2.22 ++			       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
    2.23 ++	} else {
    2.24 + 		spin_lock_irqsave(&pgd_lock, flags);
    2.25  -
    2.26  -	memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
    2.27  -			swapper_pg_dir + USER_PTRS_PER_PGD,
    2.28 @@ -30,53 +36,40 @@ diff -urNpP linux-2.6.12/arch/i386/mm/pg
    2.29  -	pgd_list_add(pgd);
    2.30  -	spin_unlock_irqrestore(&pgd_lock, flags);
    2.31  -	memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
    2.32 -+	if (PTRS_PER_PMD > 1) {
    2.33 -+		if (HAVE_SHARED_KERNEL_PMD)
    2.34 -+			memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
    2.35 -+			       swapper_pg_dir, sizeof(pgd_t));
    2.36 -+	} else {
    2.37 -+		if (!HAVE_SHARED_KERNEL_PMD)
    2.38 -+			spin_lock_irqsave(&pgd_lock, flags);
    2.39  +		memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
    2.40  +		       swapper_pg_dir + USER_PTRS_PER_PGD,
    2.41  +		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
    2.42  +		memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
    2.43 -+		if (!HAVE_SHARED_KERNEL_PMD) {
    2.44 -+			pgd_list_add(pgd);
    2.45 -+			spin_unlock_irqrestore(&pgd_lock, flags);
    2.46 -+		}
    2.47 ++		pgd_list_add(pgd);
    2.48 ++		spin_unlock_irqrestore(&pgd_lock, flags);
    2.49  +	}
    2.50   }
    2.51   
    2.52   /* never called when PTRS_PER_PMD > 1 */
    2.53 -@@ -219,6 +222,9 @@ void pgd_dtor(void *pgd, kmem_cache_t *c
    2.54 - {
    2.55 - 	unsigned long flags; /* can be called from interrupt context */
    2.56 - 
    2.57 -+	if (HAVE_SHARED_KERNEL_PMD)
    2.58 -+		return;
    2.59 -+
    2.60 - 	spin_lock_irqsave(&pgd_lock, flags);
    2.61 - 	pgd_list_del(pgd);
    2.62 - 	spin_unlock_irqrestore(&pgd_lock, flags);
    2.63 -@@ -238,6 +244,24 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
    2.64 +@@ -238,6 +239,30 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
    2.65   			goto out_oom;
    2.66   		set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
    2.67   	}
    2.68  +
    2.69  +	if (!HAVE_SHARED_KERNEL_PMD) {
    2.70  +		unsigned long flags;
    2.71 -+		pgd_t *copy_pgd = pgd_offset_k(PAGE_OFFSET);
    2.72 -+		pud_t *copy_pud = pud_offset(copy_pgd, PAGE_OFFSET);
    2.73 -+		pmd_t *copy_pmd = pmd_offset(copy_pud, PAGE_OFFSET);
    2.74 -+		pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
    2.75 -+                ++i;
    2.76 -+		if (!pmd)
    2.77 -+			goto out_oom;
    2.78 ++
    2.79 ++		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
    2.80 ++			pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
    2.81 ++			if (!pmd)
    2.82 ++				goto out_oom;
    2.83 ++			set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
    2.84 ++		}
    2.85  +
    2.86  +		spin_lock_irqsave(&pgd_lock, flags);
    2.87 -+		memcpy(pmd, copy_pmd, PAGE_SIZE);
    2.88 -+		set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
    2.89 ++		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
    2.90 ++			unsigned long v = (unsigned long)i << PGDIR_SHIFT;
    2.91 ++			pgd_t *kpgd = pgd_offset_k(v);
    2.92 ++			pud_t *kpud = pud_offset(kpgd, v);
    2.93 ++			pmd_t *kpmd = pmd_offset(kpud, v);
    2.94 ++			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
    2.95 ++			memcpy(pmd, kpmd, PAGE_SIZE);
    2.96 ++		}
    2.97  +		pgd_list_add(pgd);
    2.98  +		spin_unlock_irqrestore(&pgd_lock, flags);
    2.99  +	}
   2.100 @@ -84,7 +77,7 @@ diff -urNpP linux-2.6.12/arch/i386/mm/pg
   2.101   	return pgd;
   2.102   
   2.103   out_oom:
   2.104 -@@ -252,9 +276,21 @@ void pgd_free(pgd_t *pgd)
   2.105 +@@ -252,9 +277,23 @@ void pgd_free(pgd_t *pgd)
   2.106   	int i;
   2.107   
   2.108   	/* in the PAE case user pgd entries are overwritten before usage */
   2.109 @@ -98,12 +91,14 @@ diff -urNpP linux-2.6.12/arch/i386/mm/pg
   2.110  +		}
   2.111  +		if (!HAVE_SHARED_KERNEL_PMD) {
   2.112  +			unsigned long flags;
   2.113 -+			pmd_t *pmd = (void *)__va(pgd_val(pgd[USER_PTRS_PER_PGD])-1);
   2.114  +			spin_lock_irqsave(&pgd_lock, flags);
   2.115  +			pgd_list_del(pgd);
   2.116  +			spin_unlock_irqrestore(&pgd_lock, flags);
   2.117 -+			memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
   2.118 -+			kmem_cache_free(pmd_cache, pmd);
   2.119 ++			for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
   2.120 ++				pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
   2.121 ++				memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
   2.122 ++				kmem_cache_free(pmd_cache, pmd);
   2.123 ++			}
   2.124  +		}
   2.125  +	}
   2.126   	/* in the non-PAE case, free_pgtables() clears user pgd entries */
     3.1 --- a/tools/libxc/xc_linux_save.c	Sun Nov 27 01:06:20 2005 +0000
     3.2 +++ b/tools/libxc/xc_linux_save.c	Sun Nov 27 01:06:44 2005 +0000
     3.3 @@ -457,6 +457,15 @@ void canonicalize_pagetable(unsigned lon
     3.4              xen_start = (hvirt_start >> L2_PAGETABLE_SHIFT_PAE) & 0x1ff; 
     3.5      }
     3.6  
     3.7 +    if (pt_levels == 4 && type == L4TAB) { 
     3.8 +        /*
     3.9 +        ** XXX SMH: should compute these from hvirt_start (which we have) 
    3.10 +        ** and hvirt_end (which we don't) 
    3.11 +        */
    3.12 +        xen_start = 256; 
    3.13 +        xen_end   = 272; 
    3.14 +    }
    3.15 +
    3.16      /* Now iterate through the page table, canonicalizing each PTE */
    3.17      for (i = 0; i < pte_last; i++ ) {
    3.18  
    3.19 @@ -721,12 +730,6 @@ int xc_linux_save(int xc_handle, int io_
    3.20      }
    3.21  
    3.22      /* Domain is still running at this point */
    3.23 -
    3.24 -    if (live && (pt_levels == 4)) {
    3.25 -        ERR("Live migration not supported for 64-bit guests");
    3.26 -        live = 0;
    3.27 -    }
    3.28 -
    3.29      if (live) {
    3.30  
    3.31          if (xc_shadow_control(xc_handle, dom, 
    3.32 @@ -811,7 +814,7 @@ int xc_linux_save(int xc_handle, int io_
    3.33          for (i = 0; i < max_pfn; i++) {
    3.34  
    3.35              mfn = live_p2m[i];
    3.36 -            if((mfn != 0xffffffffUL) && (mfn_to_pfn(mfn) != i)) { 
    3.37 +            if((mfn != INVALID_P2M_ENTRY) && (mfn_to_pfn(mfn) != i)) { 
    3.38                  DPRINTF("i=0x%x mfn=%lx live_m2p=%lx\n", i, 
    3.39                          mfn, mfn_to_pfn(mfn));
    3.40                  err++;
     4.1 --- a/tools/python/xen/xend/XendCheckpoint.py	Sun Nov 27 01:06:20 2005 +0000
     4.2 +++ b/tools/python/xen/xend/XendCheckpoint.py	Sun Nov 27 01:06:44 2005 +0000
     4.3 @@ -128,7 +128,7 @@ def restore(xd, fd):
     4.4      try:
     4.5          l = read_exact(fd, sizeof_unsigned_long,
     4.6                         "not a valid guest state file: pfn count read")
     4.7 -        nr_pfns = unpack("=L", l)[0]   # XXX endianess
     4.8 +        nr_pfns = unpack("L", l)[0]    # native sizeof long
     4.9          if nr_pfns > 16*1024*1024:     # XXX 
    4.10              raise XendError(
    4.11                  "not a valid guest state file: pfn count out of range")
     5.1 --- a/xen/arch/x86/x86_64/mm.c	Sun Nov 27 01:06:20 2005 +0000
     5.2 +++ b/xen/arch/x86/x86_64/mm.c	Sun Nov 27 01:06:44 2005 +0000
     5.3 @@ -190,7 +190,9 @@ long arch_memory_op(int op, void *arg)
     5.4          if ( copy_from_user(&xmml, arg, sizeof(xmml)) )
     5.5              return -EFAULT;
     5.6  
     5.7 -        for ( v = RDWR_MPT_VIRT_START; v != RDWR_MPT_VIRT_END; v += 1 << 21 )
     5.8 +        for ( i = 0, v = RDWR_MPT_VIRT_START;
     5.9 +              (i != xmml.max_extents) && (v != RDWR_MPT_VIRT_END);
    5.10 +              i++, v += 1 << 21 )
    5.11          {
    5.12              l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[
    5.13                  l3_table_offset(v)];
    5.14 @@ -200,11 +202,8 @@ long arch_memory_op(int op, void *arg)
    5.15              if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
    5.16                  break;
    5.17              mfn = l2e_get_pfn(l2e) + l1_table_offset(v);
    5.18 -            if ( i == xmml.max_extents )
    5.19 -                break;
    5.20              if ( put_user(mfn, &xmml.extent_start[i]) )
    5.21                  return -EFAULT;
    5.22 -            i++;
    5.23          }
    5.24  
    5.25          if ( put_user(i, &((struct xen_machphys_mfn_list *)arg)->nr_extents) )