ia64/xen-unstable

changeset 8073:001ba14fbb1b

More cleanups to the pmd-shared patch.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Nov 26 11:32:57 2005 +0100 (2005-11-26)
parents b05e1c4bc31b
children 486f4c9e1c22
files linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c patches/linux-2.6.12/pmd-shared.patch
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c	Sat Nov 26 10:43:27 2005 +0100
     1.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c	Sat Nov 26 11:32:57 2005 +0100
     1.3 @@ -278,26 +278,22 @@ void pgd_ctor(void *pgd, kmem_cache_t *c
     1.4  	unsigned long flags;
     1.5  
     1.6  	if (PTRS_PER_PMD > 1) {
     1.7 -#ifdef CONFIG_XEN
     1.8  		/* Ensure pgd resides below 4GB. */
     1.9  		int rc = xen_create_contiguous_region(
    1.10  			(unsigned long)pgd, 0, 32);
    1.11  		BUG_ON(rc);
    1.12 -#endif
    1.13  		if (HAVE_SHARED_KERNEL_PMD)
    1.14  			memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
    1.15 -			       swapper_pg_dir, sizeof(pgd_t));
    1.16 +			       swapper_pg_dir + USER_PTRS_PER_PGD,
    1.17 +			       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
    1.18  	} else {
    1.19 -		if (!HAVE_SHARED_KERNEL_PMD)
    1.20 -			spin_lock_irqsave(&pgd_lock, flags);
    1.21 +		spin_lock_irqsave(&pgd_lock, flags);
    1.22  		memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
    1.23  		       swapper_pg_dir + USER_PTRS_PER_PGD,
    1.24  		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
    1.25  		memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
    1.26 -		if (!HAVE_SHARED_KERNEL_PMD) {
    1.27 -			pgd_list_add(pgd);
    1.28 -			spin_unlock_irqrestore(&pgd_lock, flags);
    1.29 -		}
    1.30 +		pgd_list_add(pgd);
    1.31 +		spin_unlock_irqrestore(&pgd_lock, flags);
    1.32  	}
    1.33  }
    1.34  
    1.35 @@ -306,9 +302,6 @@ void pgd_dtor(void *pgd, kmem_cache_t *c
    1.36  {
    1.37  	unsigned long flags; /* can be called from interrupt context */
    1.38  
    1.39 -	if (HAVE_SHARED_KERNEL_PMD)
    1.40 -		return;
    1.41 -
    1.42  	spin_lock_irqsave(&pgd_lock, flags);
    1.43  	pgd_list_del(pgd);
    1.44  	spin_unlock_irqrestore(&pgd_lock, flags);
    1.45 @@ -335,18 +328,24 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
    1.46  
    1.47  	if (!HAVE_SHARED_KERNEL_PMD) {
    1.48  		unsigned long flags;
    1.49 -		pgd_t *copy_pgd = pgd_offset_k(PAGE_OFFSET);
    1.50 -		pud_t *copy_pud = pud_offset(copy_pgd, PAGE_OFFSET);
    1.51 -		pmd_t *copy_pmd = pmd_offset(copy_pud, PAGE_OFFSET);
    1.52 -		pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
    1.53 -		++i;
    1.54 -		if (!pmd)
    1.55 -			goto out_oom;
    1.56 +
    1.57 +		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
    1.58 +			pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
    1.59 +			if (!pmd)
    1.60 +				goto out_oom;
    1.61 +			set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
    1.62 +		}
    1.63  
    1.64  		spin_lock_irqsave(&pgd_lock, flags);
    1.65 -		memcpy(pmd, copy_pmd, PAGE_SIZE);
    1.66 -		make_lowmem_page_readonly(pmd);
    1.67 -		set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
    1.68 +		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
    1.69 +			unsigned long v = (unsigned long)i << PGDIR_SHIFT;
    1.70 +			pgd_t *kpgd = pgd_offset_k(v);
    1.71 +			pud_t *kpud = pud_offset(kpgd, v);
    1.72 +			pmd_t *kpmd = pmd_offset(kpud, v);
    1.73 +			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
    1.74 +			memcpy(pmd, kpmd, PAGE_SIZE);
    1.75 +			make_lowmem_page_readonly(pmd);
    1.76 +		}
    1.77  		pgd_list_add(pgd);
    1.78  		spin_unlock_irqrestore(&pgd_lock, flags);
    1.79  	}
    1.80 @@ -374,13 +373,15 @@ void pgd_free(pgd_t *pgd)
    1.81  		}
    1.82  		if (!HAVE_SHARED_KERNEL_PMD) {
    1.83  			unsigned long flags;
    1.84 -			pmd_t *pmd = (void *)__va(pgd_val(pgd[USER_PTRS_PER_PGD])-1);
    1.85  			spin_lock_irqsave(&pgd_lock, flags);
    1.86  			pgd_list_del(pgd);
    1.87  			spin_unlock_irqrestore(&pgd_lock, flags);
    1.88 -			make_lowmem_page_writable(pmd);
    1.89 -			memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
    1.90 -			kmem_cache_free(pmd_cache, pmd);
    1.91 +			for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
    1.92 +				pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
    1.93 +				make_lowmem_page_writable(pmd);
    1.94 +				memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
    1.95 +				kmem_cache_free(pmd_cache, pmd);
    1.96 +			}
    1.97  		}
    1.98  	}
    1.99  	/* in the non-PAE case, free_pgtables() clears user pgd entries */
     2.1 --- a/patches/linux-2.6.12/pmd-shared.patch	Sat Nov 26 10:43:27 2005 +0100
     2.2 +++ b/patches/linux-2.6.12/pmd-shared.patch	Sat Nov 26 11:32:57 2005 +0100
     2.3 @@ -11,14 +11,20 @@ diff -urNpP linux-2.6.12/arch/i386/mm/pa
     2.4   
     2.5   	spin_lock_irqsave(&pgd_lock, flags);
     2.6  diff -urNpP linux-2.6.12/arch/i386/mm/pgtable.c linux-2.6.12.new/arch/i386/mm/pgtable.c
     2.7 ---- linux-2.6.12/arch/i386/mm/pgtable.c	2005-11-24 21:51:49.000000000 +0000
     2.8 -+++ linux-2.6.12.new/arch/i386/mm/pgtable.c	2005-11-24 22:06:04.000000000 +0000
     2.9 -@@ -199,19 +199,22 @@ void pgd_ctor(void *pgd, kmem_cache_t *c
    2.10 +--- linux-2.6.12/arch/i386/mm/pgtable.c	2005-11-26 09:55:10.000000000 +0000
    2.11 ++++ linux-2.6.12.new/arch/i386/mm/pgtable.c	2005-11-26 10:20:36.000000000 +0000
    2.12 +@@ -199,19 +199,20 @@ void pgd_ctor(void *pgd, kmem_cache_t *c
    2.13   {
    2.14   	unsigned long flags;
    2.15   
    2.16  -	if (PTRS_PER_PMD == 1)
    2.17 --		spin_lock_irqsave(&pgd_lock, flags);
    2.18 ++	if (PTRS_PER_PMD > 1) {
    2.19 ++		if (HAVE_SHARED_KERNEL_PMD)
    2.20 ++			memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
    2.21 ++			       swapper_pg_dir + USER_PTRS_PER_PGD,
    2.22 ++			       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
    2.23 ++	} else {
    2.24 + 		spin_lock_irqsave(&pgd_lock, flags);
    2.25  -
    2.26  -	memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
    2.27  -			swapper_pg_dir + USER_PTRS_PER_PGD,
    2.28 @@ -30,53 +36,40 @@ diff -urNpP linux-2.6.12/arch/i386/mm/pg
    2.29  -	pgd_list_add(pgd);
    2.30  -	spin_unlock_irqrestore(&pgd_lock, flags);
    2.31  -	memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
    2.32 -+	if (PTRS_PER_PMD > 1) {
    2.33 -+		if (HAVE_SHARED_KERNEL_PMD)
    2.34 -+			memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
    2.35 -+			       swapper_pg_dir, sizeof(pgd_t));
    2.36 -+	} else {
    2.37 -+		if (!HAVE_SHARED_KERNEL_PMD)
    2.38 -+			spin_lock_irqsave(&pgd_lock, flags);
    2.39  +		memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
    2.40  +		       swapper_pg_dir + USER_PTRS_PER_PGD,
    2.41  +		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
    2.42  +		memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
    2.43 -+		if (!HAVE_SHARED_KERNEL_PMD) {
    2.44 -+			pgd_list_add(pgd);
    2.45 -+			spin_unlock_irqrestore(&pgd_lock, flags);
    2.46 -+		}
    2.47 ++		pgd_list_add(pgd);
    2.48 ++		spin_unlock_irqrestore(&pgd_lock, flags);
    2.49  +	}
    2.50   }
    2.51   
    2.52   /* never called when PTRS_PER_PMD > 1 */
    2.53 -@@ -219,6 +222,9 @@ void pgd_dtor(void *pgd, kmem_cache_t *c
    2.54 - {
    2.55 - 	unsigned long flags; /* can be called from interrupt context */
    2.56 - 
    2.57 -+	if (HAVE_SHARED_KERNEL_PMD)
    2.58 -+		return;
    2.59 -+
    2.60 - 	spin_lock_irqsave(&pgd_lock, flags);
    2.61 - 	pgd_list_del(pgd);
    2.62 - 	spin_unlock_irqrestore(&pgd_lock, flags);
    2.63 -@@ -238,6 +244,24 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
    2.64 +@@ -238,6 +239,30 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
    2.65   			goto out_oom;
    2.66   		set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
    2.67   	}
    2.68  +
    2.69  +	if (!HAVE_SHARED_KERNEL_PMD) {
    2.70  +		unsigned long flags;
    2.71 -+		pgd_t *copy_pgd = pgd_offset_k(PAGE_OFFSET);
    2.72 -+		pud_t *copy_pud = pud_offset(copy_pgd, PAGE_OFFSET);
    2.73 -+		pmd_t *copy_pmd = pmd_offset(copy_pud, PAGE_OFFSET);
    2.74 -+		pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
    2.75 -+                ++i;
    2.76 -+		if (!pmd)
    2.77 -+			goto out_oom;
    2.78 ++
    2.79 ++		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
    2.80 ++			pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
    2.81 ++			if (!pmd)
    2.82 ++				goto out_oom;
    2.83 ++			set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
    2.84 ++		}
    2.85  +
    2.86  +		spin_lock_irqsave(&pgd_lock, flags);
    2.87 -+		memcpy(pmd, copy_pmd, PAGE_SIZE);
    2.88 -+		set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
    2.89 ++		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
    2.90 ++			unsigned long v = (unsigned long)i << PGDIR_SHIFT;
    2.91 ++			pgd_t *kpgd = pgd_offset_k(v);
    2.92 ++			pud_t *kpud = pud_offset(kpgd, v);
    2.93 ++			pmd_t *kpmd = pmd_offset(kpud, v);
    2.94 ++			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
    2.95 ++			memcpy(pmd, kpmd, PAGE_SIZE);
    2.96 ++		}
    2.97  +		pgd_list_add(pgd);
    2.98  +		spin_unlock_irqrestore(&pgd_lock, flags);
    2.99  +	}
   2.100 @@ -84,7 +77,7 @@ diff -urNpP linux-2.6.12/arch/i386/mm/pg
   2.101   	return pgd;
   2.102   
   2.103   out_oom:
   2.104 -@@ -252,9 +276,21 @@ void pgd_free(pgd_t *pgd)
   2.105 +@@ -252,9 +277,23 @@ void pgd_free(pgd_t *pgd)
   2.106   	int i;
   2.107   
   2.108   	/* in the PAE case user pgd entries are overwritten before usage */
   2.109 @@ -98,12 +91,14 @@ diff -urNpP linux-2.6.12/arch/i386/mm/pg
   2.110  +		}
   2.111  +		if (!HAVE_SHARED_KERNEL_PMD) {
   2.112  +			unsigned long flags;
   2.113 -+			pmd_t *pmd = (void *)__va(pgd_val(pgd[USER_PTRS_PER_PGD])-1);
   2.114  +			spin_lock_irqsave(&pgd_lock, flags);
   2.115  +			pgd_list_del(pgd);
   2.116  +			spin_unlock_irqrestore(&pgd_lock, flags);
   2.117 -+			memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
   2.118 -+			kmem_cache_free(pmd_cache, pmd);
   2.119 ++			for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
   2.120 ++				pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
   2.121 ++				memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
   2.122 ++				kmem_cache_free(pmd_cache, pmd);
   2.123 ++			}
   2.124  +		}
   2.125  +	}
   2.126   	/* in the non-PAE case, free_pgtables() clears user pgd entries */