ia64/xen-unstable

changeset 9500:e311941eb7be

[IA64] dom0 vp model linux part: import pgalloc.h from linux-2.6.16-rc3

[note: verified same as linux-2.6.16]

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Fri Apr 07 14:02:31 2006 -0600 (2006-04-07)
parents aab421b5ad45
children ebec4edfa8e4
files linux-2.6-xen-sparse/include/asm-ia64/pgalloc.h
line diff
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/linux-2.6-xen-sparse/include/asm-ia64/pgalloc.h	Fri Apr 07 14:02:31 2006 -0600
     1.3 @@ -0,0 +1,164 @@
     1.4 +#ifndef _ASM_IA64_PGALLOC_H
     1.5 +#define _ASM_IA64_PGALLOC_H
     1.6 +
     1.7 +/*
     1.8 + * This file contains the functions and defines necessary to allocate
     1.9 + * page tables.
    1.10 + *
    1.11 + * This hopefully works with any (fixed) ia-64 page-size, as defined
    1.12 + * in <asm/page.h> (currently 8192).
    1.13 + *
    1.14 + * Copyright (C) 1998-2001 Hewlett-Packard Co
    1.15 + *	David Mosberger-Tang <davidm@hpl.hp.com>
    1.16 + * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
    1.17 + */
    1.18 +
    1.19 +#include <linux/config.h>
    1.20 +
    1.21 +#include <linux/compiler.h>
    1.22 +#include <linux/mm.h>
    1.23 +#include <linux/page-flags.h>
    1.24 +#include <linux/threads.h>
    1.25 +
    1.26 +#include <asm/mmu_context.h>
    1.27 +
    1.28 +DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist);
    1.29 +#define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist)
    1.30 +DECLARE_PER_CPU(long, __pgtable_quicklist_size);
    1.31 +#define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size)
    1.32 +
    1.33 +static inline long pgtable_quicklist_total_size(void)
    1.34 +{
    1.35 +	long ql_size = 0;
    1.36 +	int cpuid;
    1.37 +
    1.38 +	for_each_online_cpu(cpuid) {
    1.39 +		ql_size += per_cpu(__pgtable_quicklist_size, cpuid);
    1.40 +	}
    1.41 +	return ql_size;
    1.42 +}
    1.43 +
    1.44 +static inline void *pgtable_quicklist_alloc(void)
    1.45 +{
    1.46 +	unsigned long *ret = NULL;
    1.47 +
    1.48 +	preempt_disable();
    1.49 +
    1.50 +	ret = pgtable_quicklist;
    1.51 +	if (likely(ret != NULL)) {
    1.52 +		pgtable_quicklist = (unsigned long *)(*ret);
    1.53 +		ret[0] = 0;
    1.54 +		--pgtable_quicklist_size;
    1.55 +		preempt_enable();
    1.56 +	} else {
    1.57 +		preempt_enable();
    1.58 +		ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
    1.59 +	}
    1.60 +
    1.61 +	return ret;
    1.62 +}
    1.63 +
    1.64 +static inline void pgtable_quicklist_free(void *pgtable_entry)
    1.65 +{
    1.66 +#ifdef CONFIG_NUMA
    1.67 +	unsigned long nid = page_to_nid(virt_to_page(pgtable_entry));
    1.68 +
    1.69 +	if (unlikely(nid != numa_node_id())) {
    1.70 +		free_page((unsigned long)pgtable_entry);
    1.71 +		return;
    1.72 +	}
    1.73 +#endif
    1.74 +
    1.75 +	preempt_disable();
    1.76 +	*(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist;
    1.77 +	pgtable_quicklist = (unsigned long *)pgtable_entry;
    1.78 +	++pgtable_quicklist_size;
    1.79 +	preempt_enable();
    1.80 +}
    1.81 +
    1.82 +static inline pgd_t *pgd_alloc(struct mm_struct *mm)
    1.83 +{
    1.84 +	return pgtable_quicklist_alloc();
    1.85 +}
    1.86 +
    1.87 +static inline void pgd_free(pgd_t * pgd)
    1.88 +{
    1.89 +	pgtable_quicklist_free(pgd);
    1.90 +}
    1.91 +
    1.92 +#ifdef CONFIG_PGTABLE_4
    1.93 +static inline void
    1.94 +pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
    1.95 +{
    1.96 +	pgd_val(*pgd_entry) = __pa(pud);
    1.97 +}
    1.98 +
    1.99 +static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
   1.100 +{
   1.101 +	return pgtable_quicklist_alloc();
   1.102 +}
   1.103 +
   1.104 +static inline void pud_free(pud_t * pud)
   1.105 +{
   1.106 +	pgtable_quicklist_free(pud);
   1.107 +}
   1.108 +#define __pud_free_tlb(tlb, pud)	pud_free(pud)
   1.109 +#endif /* CONFIG_PGTABLE_4 */
   1.110 +
   1.111 +static inline void
   1.112 +pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
   1.113 +{
   1.114 +	pud_val(*pud_entry) = __pa(pmd);
   1.115 +}
   1.116 +
   1.117 +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
   1.118 +{
   1.119 +	return pgtable_quicklist_alloc();
   1.120 +}
   1.121 +
   1.122 +static inline void pmd_free(pmd_t * pmd)
   1.123 +{
   1.124 +	pgtable_quicklist_free(pmd);
   1.125 +}
   1.126 +
   1.127 +#define __pmd_free_tlb(tlb, pmd)	pmd_free(pmd)
   1.128 +
   1.129 +static inline void
   1.130 +pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
   1.131 +{
   1.132 +	pmd_val(*pmd_entry) = page_to_phys(pte);
   1.133 +}
   1.134 +
   1.135 +static inline void
   1.136 +pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
   1.137 +{
   1.138 +	pmd_val(*pmd_entry) = __pa(pte);
   1.139 +}
   1.140 +
   1.141 +static inline struct page *pte_alloc_one(struct mm_struct *mm,
   1.142 +					 unsigned long addr)
   1.143 +{
   1.144 +	return virt_to_page(pgtable_quicklist_alloc());
   1.145 +}
   1.146 +
   1.147 +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
   1.148 +					  unsigned long addr)
   1.149 +{
   1.150 +	return pgtable_quicklist_alloc();
   1.151 +}
   1.152 +
   1.153 +static inline void pte_free(struct page *pte)
   1.154 +{
   1.155 +	pgtable_quicklist_free(page_address(pte));
   1.156 +}
   1.157 +
   1.158 +static inline void pte_free_kernel(pte_t * pte)
   1.159 +{
   1.160 +	pgtable_quicklist_free(pte);
   1.161 +}
   1.162 +
   1.163 +#define __pte_free_tlb(tlb, pte)	pte_free(pte)
   1.164 +
   1.165 +extern void check_pgt_cache(void);
   1.166 +
   1.167 +#endif				/* _ASM_IA64_PGALLOC_H */