ia64/xen-unstable

changeset 9756:14a34d811e81

[IA64] introduce P2M conversion

introduce P2M conversion functions necessary for dom0vp model.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@ldap.hp.com
date Tue Apr 25 13:06:57 2006 -0600 (2006-04-25)
parents 259ba45ed77d
children bb99a6e5456a
files xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/hyperprivop.S xen/arch/ia64/xen/process.c xen/arch/ia64/xen/vcpu.c xen/arch/ia64/xen/xenmisc.c xen/include/asm-ia64/domain.h xen/include/asm-ia64/mm.h xen/include/asm-ia64/vcpu.h xen/include/asm-ia64/xenprocessor.h
line diff
     1.1 --- a/xen/arch/ia64/xen/domain.c	Tue Apr 25 10:54:45 2006 -0700
     1.2 +++ b/xen/arch/ia64/xen/domain.c	Tue Apr 25 13:06:57 2006 -0600
     1.3 @@ -54,7 +54,9 @@
     1.4  #include <asm/regionreg.h>
     1.5  #include <asm/dom_fw.h>
     1.6  
     1.7 +#ifndef CONFIG_XEN_IA64_DOM0_VP
     1.8  #define CONFIG_DOMAIN0_CONTIGUOUS
     1.9 +#endif
    1.10  unsigned long dom0_start = -1L;
    1.11  unsigned long dom0_size = 512*1024*1024;
    1.12  unsigned long dom0_align = 64*1024*1024;
    1.13 @@ -503,99 +505,291 @@ void new_thread(struct vcpu *v,
    1.14  	}
    1.15  }
    1.16  
    1.17 +static pte_t*
    1.18 +lookup_alloc_domain_pte(struct domain* d, unsigned long mpaddr)
    1.19 +{
    1.20 +    struct page_info *pt;
    1.21 +    struct mm_struct *mm = d->arch.mm;
    1.22 +    pgd_t *pgd;
    1.23 +    pud_t *pud;
    1.24 +    pmd_t *pmd;
    1.25 +
    1.26 +    BUG_ON(mm->pgd == NULL);
    1.27 +    pgd = pgd_offset(mm, mpaddr);
    1.28 +    if (pgd_none(*pgd)) {
    1.29 +        pgd_populate(mm, pgd, pud_alloc_one(mm,mpaddr));
    1.30 +        pt = maddr_to_page(pgd_val(*pgd));
    1.31 +        list_add_tail(&pt->list, &d->arch.mm->pt_list);
    1.32 +    }
    1.33 +
    1.34 +    pud = pud_offset(pgd, mpaddr);
    1.35 +    if (pud_none(*pud)) {
    1.36 +        pud_populate(mm, pud, pmd_alloc_one(mm,mpaddr));
    1.37 +        pt = maddr_to_page(pud_val(*pud));
    1.38 +        list_add_tail(&pt->list, &d->arch.mm->pt_list);
    1.39 +    }
    1.40 +
    1.41 +    pmd = pmd_offset(pud, mpaddr);
    1.42 +    if (pmd_none(*pmd)) {
    1.43 +        pmd_populate_kernel(mm, pmd, pte_alloc_one_kernel(mm, mpaddr));
    1.44 +        pt = maddr_to_page(pmd_val(*pmd));
    1.45 +        list_add_tail(&pt->list, &d->arch.mm->pt_list);
    1.46 +    }
    1.47 +
    1.48 +    return pte_offset_map(pmd, mpaddr);
    1.49 +}
    1.50 +
    1.51 +//XXX xxx_none() should be used instread of !xxx_present()?
    1.52 +static pte_t*
    1.53 +lookup_noalloc_domain_pte(struct domain* d, unsigned long mpaddr)
    1.54 +{
    1.55 +    struct mm_struct *mm = d->arch.mm;
    1.56 +    pgd_t *pgd;
    1.57 +    pud_t *pud;
    1.58 +    pmd_t *pmd;
    1.59 +
    1.60 +    BUG_ON(mm->pgd == NULL);
    1.61 +    pgd = pgd_offset(mm, mpaddr);
    1.62 +    if (!pgd_present(*pgd))
    1.63 +        goto not_present;
    1.64 +
    1.65 +    pud = pud_offset(pgd, mpaddr);
    1.66 +    if (!pud_present(*pud))
    1.67 +        goto not_present;
    1.68 +
    1.69 +    pmd = pmd_offset(pud, mpaddr);
    1.70 +    if (!pmd_present(*pmd))
    1.71 +        goto not_present;
    1.72 +
    1.73 +    return pte_offset_map(pmd, mpaddr);
    1.74 +
    1.75 +not_present:
    1.76 +    return NULL;
    1.77 +}
    1.78 +
    1.79 +#ifdef CONFIG_XEN_IA64_DOM0_VP
    1.80 +static pte_t*
    1.81 +lookup_noalloc_domain_pte_none(struct domain* d, unsigned long mpaddr)
    1.82 +{
    1.83 +    struct mm_struct *mm = d->arch.mm;
    1.84 +    pgd_t *pgd;
    1.85 +    pud_t *pud;
    1.86 +    pmd_t *pmd;
    1.87 +
    1.88 +    BUG_ON(mm->pgd == NULL);
    1.89 +    pgd = pgd_offset(mm, mpaddr);
    1.90 +    if (pgd_none(*pgd))
    1.91 +        goto not_present;
    1.92 +
    1.93 +    pud = pud_offset(pgd, mpaddr);
    1.94 +    if (pud_none(*pud))
    1.95 +        goto not_present;
    1.96 +
    1.97 +    pmd = pmd_offset(pud, mpaddr);
    1.98 +    if (pmd_none(*pmd))
    1.99 +        goto not_present;
   1.100 +
   1.101 +    return pte_offset_map(pmd, mpaddr);
   1.102 +
   1.103 +not_present:
   1.104 +    return NULL;
   1.105 +}
   1.106 +#endif
   1.107  
   1.108  /* Allocate a new page for domain and map it to the specified metaphysical 
   1.109     address.  */
   1.110 -static struct page_info * assign_new_domain_page(struct domain *d, unsigned long mpaddr)
   1.111 +struct page_info *
   1.112 +__assign_new_domain_page(struct domain *d, unsigned long mpaddr, pte_t* pte)
   1.113  {
   1.114 -	unsigned long maddr;
   1.115 -	struct page_info *p;
   1.116 +    struct page_info *p = NULL;
   1.117 +    unsigned long maddr;
   1.118 +
   1.119 +    BUG_ON(!pte_none(*pte));
   1.120  
   1.121  #ifdef CONFIG_DOMAIN0_CONTIGUOUS
   1.122 -	if (d == dom0) {
   1.123 -		if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
   1.124 -			/* FIXME: is it true ?
   1.125 -			   dom0 memory is not contiguous!  */
   1.126 -			printk("assign_new_domain_page: bad domain0 "
   1.127 -			       "mpaddr=%lx, start=%lx, end=%lx!\n",
   1.128 -			       mpaddr, dom0_start, dom0_start+dom0_size);
   1.129 -			while(1);
   1.130 -		}
   1.131 -		p = mfn_to_page((mpaddr >> PAGE_SHIFT));
   1.132 -	}
   1.133 -	else
   1.134 +    if (d == dom0) {
   1.135 +#if 0
   1.136 +        if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
   1.137 +            /* FIXME: is it true ?
   1.138 +               dom0 memory is not contiguous!  */
   1.139 +            panic("assign_new_domain_page: bad domain0 "
   1.140 +                  "mpaddr=%lx, start=%lx, end=%lx!\n",
   1.141 +                  mpaddr, dom0_start, dom0_start+dom0_size);
   1.142 +        }
   1.143  #endif
   1.144 -	{
   1.145 -		p = alloc_domheap_page(d);
   1.146 -		// zero out pages for security reasons
   1.147 -		if (p) memset(__va(page_to_maddr(p)),0,PAGE_SIZE);
   1.148 -	}
   1.149 -	if (unlikely(!p)) {
   1.150 -		printf("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n");
   1.151 -		return(p);
   1.152 -	}
   1.153 -	maddr = page_to_maddr (p);
   1.154 -	if (unlikely(maddr > __get_cpu_var(vhpt_paddr)
   1.155 -		     && maddr < __get_cpu_var(vhpt_pend))) {
   1.156 -		/* FIXME: how can this happen ?
   1.157 -		   vhpt is allocated by alloc_domheap_page.  */
   1.158 -		printf("assign_new_domain_page: reassigned vhpt page %lx!!\n",
   1.159 -		       maddr);
   1.160 -	}
   1.161 -	assign_domain_page (d, mpaddr, maddr);
   1.162 -	return p;
   1.163 +        p = mfn_to_page((mpaddr >> PAGE_SHIFT));
   1.164 +        return p;
   1.165 +    }
   1.166 +    else
   1.167 +#endif
   1.168 +    {
   1.169 +        p = alloc_domheap_page(d);
   1.170 +        // zero out pages for security reasons
   1.171 +        if (p)
   1.172 +            clear_page(page_to_virt(p));
   1.173 +    }
   1.174 +    if (unlikely(!p)) {
   1.175 +        printf("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n");
   1.176 +        return(p);
   1.177 +    }
   1.178 +    maddr = page_to_maddr (p);
   1.179 +    if (unlikely(maddr > __get_cpu_var(vhpt_paddr)
   1.180 +                 && maddr < __get_cpu_var(vhpt_pend))) {
   1.181 +        /* FIXME: how can this happen ?
   1.182 +           vhpt is allocated by alloc_domheap_page.  */
   1.183 +        printf("assign_new_domain_page: reassigned vhpt page %lx!!\n",
   1.184 +               maddr);
   1.185 +    }
   1.186 +
   1.187 +    set_pte(pte, pfn_pte(maddr >> PAGE_SHIFT,
   1.188 +                         __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
   1.189 +
   1.190 +    //XXX CONFIG_XEN_IA64_DOM0_VP
   1.191 +    //    TODO racy
   1.192 +    if ((mpaddr & GPFN_IO_MASK) == GPFN_MEM)
   1.193 +        set_gpfn_from_mfn(page_to_mfn(p), mpaddr >> PAGE_SHIFT);
   1.194 +    return p;
   1.195 +}
   1.196 +
   1.197 +struct page_info *
   1.198 +assign_new_domain_page(struct domain *d, unsigned long mpaddr)
   1.199 +{
   1.200 +#ifdef CONFIG_DOMAIN0_CONTIGUOUS
   1.201 +    pte_t dummy_pte = __pte(0);
   1.202 +    return __assign_new_domain_page(d, mpaddr, &dummy_pte);
   1.203 +#else
   1.204 +    struct page_info *p = NULL;
   1.205 +    pte_t *pte;
   1.206 +
   1.207 +    pte = lookup_alloc_domain_pte(d, mpaddr);
   1.208 +    if (pte_none(*pte)) {
   1.209 +        p = __assign_new_domain_page(d, mpaddr, pte);
   1.210 +    } else {
   1.211 +        DPRINTK("%s: d 0x%p mpaddr %lx already mapped!\n",
   1.212 +                __func__, d, mpaddr);
   1.213 +    }
   1.214 +
   1.215 +    return p;
   1.216 +#endif
   1.217 +}
   1.218 +
   1.219 +void
   1.220 +assign_new_domain0_page(struct domain *d, unsigned long mpaddr)
   1.221 +{
   1.222 +#ifndef CONFIG_DOMAIN0_CONTIGUOUS
   1.223 +    pte_t *pte;
   1.224 +
   1.225 +    BUG_ON(d != dom0);
   1.226 +    pte = lookup_alloc_domain_pte(d, mpaddr);
   1.227 +    if (pte_none(*pte)) {
   1.228 +        struct page_info *p = __assign_new_domain_page(d, mpaddr, pte);
   1.229 +        if (p == NULL) {
   1.230 +            panic("%s: can't allocate page for dom0", __func__);
   1.231 +        }
   1.232 +    }
   1.233 +#endif
   1.234  }
   1.235  
   1.236  /* map a physical address to the specified metaphysical addr */
   1.237  void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr)
   1.238  {
   1.239 -	struct mm_struct *mm = d->arch.mm;
   1.240 -	struct page_info *pt;
   1.241 -	pgd_t *pgd;
   1.242 -	pud_t *pud;
   1.243 -	pmd_t *pmd;
   1.244  	pte_t *pte;
   1.245  
   1.246 -	if (!mm->pgd) {
   1.247 -		printk("assign_domain_page: domain pgd must exist!\n");
   1.248 -		return;
   1.249 -	}
   1.250 -	pgd = pgd_offset(mm,mpaddr);
   1.251 -	if (pgd_none(*pgd))
   1.252 -	{
   1.253 -		pgd_populate(mm, pgd, pud_alloc_one(mm,mpaddr));
   1.254 -		pt = maddr_to_page(pgd_val(*pgd));
   1.255 -		list_add_tail(&pt->list, &d->arch.mm->pt_list);
   1.256 -	}
   1.257 -
   1.258 -	pud = pud_offset(pgd, mpaddr);
   1.259 -	if (pud_none(*pud))
   1.260 -	{
   1.261 -		pud_populate(mm, pud, pmd_alloc_one(mm,mpaddr));
   1.262 -		pt = maddr_to_page(pud_val(*pud));
   1.263 -		list_add_tail(&pt->list, &d->arch.mm->pt_list);
   1.264 -	}
   1.265 -
   1.266 -	pmd = pmd_offset(pud, mpaddr);
   1.267 -	if (pmd_none(*pmd))
   1.268 -	{
   1.269 -		pmd_populate_kernel(mm, pmd, pte_alloc_one_kernel(mm,mpaddr));
   1.270 -//		pmd_populate(mm, pmd, pte_alloc_one(mm,mpaddr));
   1.271 -		pt = maddr_to_page(pmd_val(*pmd));
   1.272 -		list_add_tail(&pt->list, &d->arch.mm->pt_list);
   1.273 -	}
   1.274 -
   1.275 -	pte = pte_offset_map(pmd, mpaddr);
   1.276 +	pte = lookup_alloc_domain_pte(d, mpaddr);
   1.277  	if (pte_none(*pte)) {
   1.278  		set_pte(pte, pfn_pte(physaddr >> PAGE_SHIFT,
   1.279  			__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
   1.280 +
   1.281 +	//XXX CONFIG_XEN_IA64_DOM0_VP
   1.282 +	//    TODO racy
   1.283 +	if ((mpaddr & GPFN_IO_MASK) == GPFN_MEM)
   1.284 +		set_gpfn_from_mfn(physaddr >> PAGE_SHIFT, mpaddr >> PAGE_SHIFT);
   1.285  	}
   1.286  	else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr);
   1.287 -    if((physaddr>>PAGE_SHIFT)<max_page){
   1.288 -        *(mpt_table + (physaddr>>PAGE_SHIFT))=(mpaddr>>PAGE_SHIFT);
   1.289 +}
   1.290 +
   1.291 +#ifdef CONFIG_XEN_IA64_DOM0_VP
   1.292 +static void
   1.293 +assign_domain_same_page(struct domain *d,
   1.294 +                          unsigned long mpaddr, unsigned long size)
   1.295 +{
   1.296 +    //XXX optimization
   1.297 +    unsigned long end = mpaddr + size;
   1.298 +    for (; mpaddr < end; mpaddr += PAGE_SIZE) {
   1.299 +        assign_domain_page(d, mpaddr, mpaddr);
   1.300      }
   1.301  }
   1.302  
   1.303 +unsigned long
   1.304 +assign_domain_mmio_page(struct domain *d,
   1.305 +                        unsigned long mpaddr, unsigned long size)
   1.306 +{
   1.307 +    if (size == 0) {
   1.308 +        DPRINTK("%s: domain %p mpaddr 0x%lx size = 0x%lx\n",
   1.309 +                __func__, d, mpaddr, size);
   1.310 +    }
   1.311 +    assign_domain_same_page(d, mpaddr, size);
   1.312 +    return mpaddr;
   1.313 +}
   1.314 +
   1.315 +unsigned long
   1.316 +assign_domain_mach_page(struct domain *d,
   1.317 +                        unsigned long mpaddr, unsigned long size)
   1.318 +{
   1.319 +    assign_domain_same_page(d, mpaddr, size);
   1.320 +    return mpaddr;
   1.321 +}
   1.322 +
   1.323 +//XXX selege hammer.
   1.324 +//    flush finer range.
   1.325 +void
   1.326 +domain_page_flush(struct domain* d, unsigned long mpaddr,
   1.327 +                  unsigned long old_mfn, unsigned long new_mfn)
   1.328 +{
   1.329 +    struct vcpu* v;
   1.330 +    //XXX SMP
   1.331 +    for_each_vcpu(d, v) {
   1.332 +        vcpu_purge_tr_entry(&v->arch.dtlb);
   1.333 +        vcpu_purge_tr_entry(&v->arch.itlb);
   1.334 +    }
   1.335 +
   1.336 +    // flush vhpt
   1.337 +    vhpt_flush();
   1.338 +    // flush tlb
   1.339 +    flush_tlb_all();
   1.340 +}
   1.341 +
   1.342 +static void
   1.343 +zap_domain_page_one(struct domain *d, unsigned long mpaddr)
   1.344 +{
   1.345 +    struct mm_struct *mm = d->arch.mm;
   1.346 +    pte_t *pte;
   1.347 +    pte_t old_pte;
   1.348 +    unsigned long mfn;
   1.349 +    struct page_info *page;
   1.350 +
   1.351 +    pte = lookup_noalloc_domain_pte_none(d, mpaddr);
   1.352 +    if (pte == NULL)
   1.353 +        return;
   1.354 +    if (pte_none(*pte))
   1.355 +        return;
   1.356 +
   1.357 +    // update pte
   1.358 +    old_pte = ptep_get_and_clear(mm, mpaddr, pte);
   1.359 +    mfn = pte_pfn(old_pte);
   1.360 +    page = mfn_to_page(mfn);
   1.361 +
   1.362 +    if (page_get_owner(page) == d) {
   1.363 +        BUG_ON(get_gpfn_from_mfn(mfn) != (mpaddr >> PAGE_SHIFT));
   1.364 +        set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
   1.365 +    }
   1.366 +
   1.367 +    domain_page_flush(d, mpaddr, mfn, INVALID_MFN);
   1.368 +
   1.369 +    put_page(page);
   1.370 +}
   1.371 +#endif
   1.372 +
   1.373  void build_physmap_table(struct domain *d)
   1.374  {
   1.375  	struct list_head *list_ent = d->page_list.next;
   1.376 @@ -620,12 +814,42 @@ void mpafoo(unsigned long mpaddr)
   1.377  		privop_trace = 1;
   1.378  }
   1.379  
   1.380 +#ifdef CONFIG_XEN_IA64_DOM0_VP
   1.381 +unsigned long
   1.382 +____lookup_domain_mpa(struct domain *d, unsigned long mpaddr)
   1.383 +{
   1.384 +    pte_t *pte;
   1.385 +
   1.386 +    pte = lookup_noalloc_domain_pte(d, mpaddr);
   1.387 +    if (pte == NULL)
   1.388 +        goto not_present;
   1.389 +
   1.390 +    if (pte_present(*pte))
   1.391 +        return (pte->pte & _PFN_MASK);
   1.392 +    else if (VMX_DOMAIN(d->vcpu[0]))
   1.393 +        return GPFN_INV_MASK;
   1.394 +
   1.395 +not_present:
   1.396 +    return INVALID_MFN;
   1.397 +}
   1.398 +
   1.399 +unsigned long
   1.400 +__lookup_domain_mpa(struct domain *d, unsigned long mpaddr)
   1.401 +{
   1.402 +    unsigned long machine = ____lookup_domain_mpa(d, mpaddr);
   1.403 +    if (machine != INVALID_MFN)
   1.404 +        return machine;
   1.405 +
   1.406 +    printk("%s: d 0x%p id %d current 0x%p id %d\n",
   1.407 +           __func__, d, d->domain_id, current, current->vcpu_id);
   1.408 +    printk("%s: bad mpa 0x%lx (max_pages 0x%lx)\n",
   1.409 +           __func__, mpaddr, (unsigned long)d->max_pages << PAGE_SHIFT);
   1.410 +    return INVALID_MFN;
   1.411 +}
   1.412 +#endif
   1.413 +
   1.414  unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr)
   1.415  {
   1.416 -	struct mm_struct *mm = d->arch.mm;
   1.417 -	pgd_t *pgd = pgd_offset(mm, mpaddr);
   1.418 -	pud_t *pud;
   1.419 -	pmd_t *pmd;
   1.420  	pte_t *pte;
   1.421  
   1.422  #ifdef CONFIG_DOMAIN0_CONTIGUOUS
   1.423 @@ -642,26 +866,23 @@ unsigned long lookup_domain_mpa(struct d
   1.424  		return *(unsigned long *)pte;
   1.425  	}
   1.426  #endif
   1.427 -	if (pgd_present(*pgd)) {
   1.428 -		pud = pud_offset(pgd,mpaddr);
   1.429 -		if (pud_present(*pud)) {
   1.430 -			pmd = pmd_offset(pud,mpaddr);
   1.431 -			if (pmd_present(*pmd)) {
   1.432 -				pte = pte_offset_map(pmd,mpaddr);
   1.433 -				if (pte_present(*pte)) {
   1.434 +	pte = lookup_noalloc_domain_pte(d, mpaddr);
   1.435 +	if (pte != NULL) {
   1.436 +		if (pte_present(*pte)) {
   1.437  //printk("lookup_domain_page: found mapping for %lx, pte=%lx\n",mpaddr,pte_val(*pte));
   1.438 -					return *(unsigned long *)pte;
   1.439 -				} else if (VMX_DOMAIN(d->vcpu[0]))
   1.440 -					return GPFN_INV_MASK;
   1.441 -			}
   1.442 -		}
   1.443 +			return *(unsigned long *)pte;
   1.444 +		} else if (VMX_DOMAIN(d->vcpu[0]))
   1.445 +			return GPFN_INV_MASK;
   1.446  	}
   1.447 -	if ((mpaddr >> PAGE_SHIFT) < d->max_pages) {
   1.448 -		printk("lookup_domain_mpa: non-allocated mpa 0x%lx (< 0x%lx)\n",
   1.449 -			mpaddr, (unsigned long) d->max_pages<<PAGE_SHIFT);
   1.450 -	} else
   1.451 -		printk("lookup_domain_mpa: bad mpa 0x%lx (> 0x%lx)\n",
   1.452 -			mpaddr, (unsigned long) d->max_pages<<PAGE_SHIFT);
   1.453 +
   1.454 +	printk("%s: d 0x%p id %d current 0x%p id %d\n",
   1.455 +	       __func__, d, d->domain_id, current, current->vcpu_id);
   1.456 +	if ((mpaddr >> PAGE_SHIFT) < d->max_pages)
   1.457 +		printk("%s: non-allocated mpa 0x%lx (< 0x%lx)\n", __func__,
   1.458 +		       mpaddr, (unsigned long)d->max_pages << PAGE_SHIFT);
   1.459 +	else
   1.460 +		printk("%s: bad mpa 0x%lx (=> 0x%lx)\n", __func__,
   1.461 +		       mpaddr, (unsigned long)d->max_pages << PAGE_SHIFT);
   1.462  	mpafoo(mpaddr);
   1.463  	return 0;
   1.464  }
     2.1 --- a/xen/arch/ia64/xen/hyperprivop.S	Tue Apr 25 10:54:45 2006 -0700
     2.2 +++ b/xen/arch/ia64/xen/hyperprivop.S	Tue Apr 25 13:06:57 2006 -0600
     2.3 @@ -26,9 +26,20 @@
     2.4  #define FAST_REFLECT_CNT
     2.5  //#define FAST_TICK // mostly working (unat problems) but default off for now
     2.6  //#define FAST_TLB_MISS_REFLECT	// mostly working but default off for now
     2.7 +#ifdef CONFIG_XEN_IA64_DOM0_VP
     2.8 +#undef FAST_ITC	//XXX CONFIG_XEN_IA64_DOM0_VP
     2.9 +		//    TODO fast_itc doesn't suport dom0 vp yet.
    2.10 +#else
    2.11  //#define FAST_ITC	// working but default off for now
    2.12 +#endif
    2.13  #define FAST_BREAK
    2.14 -#define FAST_ACCESS_REFLECT
    2.15 +#ifndef CONFIG_XEN_IA64_DOM0_VP
    2.16 +# define FAST_ACCESS_REFLECT
    2.17 +#else
    2.18 +# undef FAST_ACCESS_REFLECT //XXX CONFIG_XEN_IA64_DOM0_VP
    2.19 +                            //    TODO fast_access_reflect
    2.20 +                            //    doesn't support dom0 vp yet.
    2.21 +#endif
    2.22  #define FAST_RFI
    2.23  #define FAST_SSM_I
    2.24  #define FAST_PTC_GA
     3.1 --- a/xen/arch/ia64/xen/process.c	Tue Apr 25 10:54:45 2006 -0700
     3.2 +++ b/xen/arch/ia64/xen/process.c	Tue Apr 25 13:06:57 2006 -0600
     3.3 @@ -81,18 +81,25 @@ void tdpfoo(void) { }
     3.4  // address, convert the pte for a physical address for (possibly different)
     3.5  // Xen PAGE_SIZE and return modified pte.  (NOTE: TLB insert should use
     3.6  // PAGE_SIZE!)
     3.7 -unsigned long translate_domain_pte(unsigned long pteval,
     3.8 -	unsigned long address, unsigned long itir)
     3.9 +u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps)
    3.10  {
    3.11  	struct domain *d = current->domain;
    3.12 -	unsigned long mask, pteval2, mpaddr;
    3.13 +	ia64_itir_t itir = {.itir = itir__};
    3.14 +	u64 mask, mpaddr, pteval2;
    3.15  
    3.16  	pteval &= ((1UL << 53) - 1);// ignore [63:53] bits
    3.17  
    3.18  	// FIXME address had better be pre-validated on insert
    3.19 -	mask = ~itir_mask(itir);
    3.20 +	mask = ~itir_mask(itir.itir);
    3.21  	mpaddr = (((pteval & ~_PAGE_ED) & _PAGE_PPN_MASK) & ~mask) |
    3.22  	         (address & mask);
    3.23 +#ifdef CONFIG_XEN_IA64_DOM0_VP
    3.24 +	if (itir.ps > PAGE_SHIFT) {
    3.25 +		itir.ps = PAGE_SHIFT;
    3.26 +	}
    3.27 +#endif
    3.28 +	*logps = itir.ps;
    3.29 +#ifndef CONFIG_XEN_IA64_DOM0_VP
    3.30  	if (d == dom0) {
    3.31  		if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
    3.32  			/*
    3.33 @@ -112,9 +119,10 @@ unsigned long translate_domain_pte(unsig
    3.34  			printf("translate_domain_pte: bad mpa=0x%lx (> 0x%lx),"
    3.35  			       "vadr=0x%lx,pteval=0x%lx,itir=0x%lx\n",
    3.36  			       mpaddr, (unsigned long)d->max_pages<<PAGE_SHIFT,
    3.37 -			       address, pteval, itir);
    3.38 +			       address, pteval, itir.itir);
    3.39  		tdpfoo();
    3.40  	}
    3.41 +#endif
    3.42  	pteval2 = lookup_domain_mpa(d,mpaddr);
    3.43  	pteval2 &= _PAGE_PPN_MASK; // ignore non-addr bits
    3.44  	pteval2 |= (pteval & _PAGE_ED);
    3.45 @@ -128,6 +136,7 @@ unsigned long translate_domain_mpaddr(un
    3.46  {
    3.47  	unsigned long pteval;
    3.48  
    3.49 +#ifndef CONFIG_XEN_IA64_DOM0_VP
    3.50  	if (current->domain == dom0) {
    3.51  		if (mpaddr < dom0_start || mpaddr >= dom0_start + dom0_size) {
    3.52  			printk("translate_domain_mpaddr: out-of-bounds dom0 mpaddr 0x%lx! continuing...\n",
    3.53 @@ -135,6 +144,7 @@ unsigned long translate_domain_mpaddr(un
    3.54  			tdpfoo();
    3.55  		}
    3.56  	}
    3.57 +#endif
    3.58  	pteval = lookup_domain_mpa(current->domain,mpaddr);
    3.59  	return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
    3.60  }
    3.61 @@ -294,8 +304,9 @@ void ia64_do_page_fault (unsigned long a
    3.62   again:
    3.63  	fault = vcpu_translate(current,address,is_data,0,&pteval,&itir,&iha);
    3.64  	if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
    3.65 -		pteval = translate_domain_pte(pteval,address,itir);
    3.66 -		vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,(itir>>2)&0x3f);
    3.67 +		u64 logps;
    3.68 +		pteval = translate_domain_pte(pteval, address, itir, &logps);
    3.69 +		vcpu_itc_no_srlz(current,is_data?2:1,address,pteval,-1UL,logps);
    3.70  		if (fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) {
    3.71  			/* dtlb has been purged in-between.  This dtlb was
    3.72  			   matching.  Undo the work.  */
     4.1 --- a/xen/arch/ia64/xen/vcpu.c	Tue Apr 25 10:54:45 2006 -0700
     4.2 +++ b/xen/arch/ia64/xen/vcpu.c	Tue Apr 25 13:06:57 2006 -0600
     4.3 @@ -25,7 +25,6 @@ extern void setreg(unsigned long regnum,
     4.4  extern void getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs);
     4.5  
     4.6  extern void panic_domain(struct pt_regs *, const char *, ...);
     4.7 -extern unsigned long translate_domain_pte(UINT64,UINT64,UINT64);
     4.8  extern unsigned long translate_domain_mpaddr(unsigned long);
     4.9  extern void ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
    4.10  
    4.11 @@ -1276,6 +1275,7 @@ static inline int vcpu_match_tr_entry(TR
    4.12  	return trp->pte.p && vcpu_match_tr_entry_no_p(trp, ifa, rid);
    4.13  }
    4.14  
    4.15 +// in_tpa is not used when CONFIG_XEN_IA64_DOM0_VP
    4.16  IA64FAULT vcpu_translate(VCPU *vcpu, UINT64 address, BOOLEAN is_data, BOOLEAN in_tpa, UINT64 *pteval, UINT64 *itir, UINT64 *iha)
    4.17  {
    4.18  	unsigned long region = address >> 61;
    4.19 @@ -1353,8 +1353,12 @@ IA64FAULT vcpu_translate(VCPU *vcpu, UIN
    4.20  	pte = trp->pte;
    4.21  	if (/* is_data && */ pte.p
    4.22  	    && vcpu_match_tr_entry_no_p(trp,address,rid)) {
    4.23 -		if (vcpu->domain==dom0 && !in_tpa) *pteval = pte.val;
    4.24 -		else *pteval = vcpu->arch.dtlb_pte;
    4.25 +#ifndef CONFIG_XEN_IA64_DOM0_VP
    4.26 +		if (vcpu->domain==dom0 && !in_tpa)
    4.27 +			*pteval = pte.val;
    4.28 +		else
    4.29 +#endif
    4.30 +		*pteval = vcpu->arch.dtlb_pte;
    4.31  		*itir = trp->itir;
    4.32  		dtlb_translate_count++;
    4.33  		return IA64_USE_TLB;
    4.34 @@ -1689,7 +1693,7 @@ IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT6
    4.35   VCPU translation register access routines
    4.36  **************************************************************************/
    4.37  
    4.38 -static inline void vcpu_purge_tr_entry(TR_ENTRY *trp)
    4.39 +void vcpu_purge_tr_entry(TR_ENTRY *trp)
    4.40  {
    4.41  	trp->pte.val = 0;
    4.42  }
    4.43 @@ -1758,6 +1762,9 @@ void vcpu_itc_no_srlz(VCPU *vcpu, UINT64
    4.44  		//FIXME: kill domain here
    4.45  		while(1);
    4.46  	}
    4.47 +#ifdef CONFIG_XEN_IA64_DOM0_VP
    4.48 +	BUG_ON(logps > PAGE_SHIFT);
    4.49 +#endif
    4.50  	psr = ia64_clear_ic();
    4.51  	ia64_itc(IorD,vaddr,pte,ps); // FIXME: look for bigger mappings
    4.52  	ia64_set_psr(psr);
    4.53 @@ -1798,7 +1805,7 @@ IA64FAULT vcpu_itc_d(VCPU *vcpu, UINT64 
    4.54  		while(1);
    4.55  	}
    4.56  	//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
    4.57 -	pteval = translate_domain_pte(pte,ifa,itir);
    4.58 +	pteval = translate_domain_pte(pte, ifa, itir, &logps);
    4.59  	if (!pteval) return IA64_ILLOP_FAULT;
    4.60  	if (swap_rr0) set_one_rr(0x0,PSCB(vcpu,rrs[0]));
    4.61  	vcpu_itc_no_srlz(vcpu,2,ifa,pteval,pte,logps);
    4.62 @@ -1818,7 +1825,7 @@ IA64FAULT vcpu_itc_i(VCPU *vcpu, UINT64 
    4.63  		while(1);
    4.64  	}
    4.65  	//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
    4.66 -	pteval = translate_domain_pte(pte,ifa,itir);
    4.67 +	pteval = translate_domain_pte(pte, ifa, itir, &logps);
    4.68  	// FIXME: what to do if bad physical address? (machine check?)
    4.69  	if (!pteval) return IA64_ILLOP_FAULT;
    4.70  	if (swap_rr0) set_one_rr(0x0,PSCB(vcpu,rrs[0]));
     5.1 --- a/xen/arch/ia64/xen/xenmisc.c	Tue Apr 25 10:54:45 2006 -0700
     5.2 +++ b/xen/arch/ia64/xen/xenmisc.c	Tue Apr 25 13:06:57 2006 -0600
     5.3 @@ -87,9 +87,12 @@ void raise_actimer_softirq(void)
     5.4  unsigned long
     5.5  gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
     5.6  {
     5.7 +#ifndef CONFIG_XEN_IA64_DOM0_VP
     5.8  	if (d == dom0)
     5.9  		return(gpfn);
    5.10 -	else {
    5.11 +	else
    5.12 +#endif
    5.13 +	{
    5.14  		unsigned long pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT);
    5.15  		if (!pte) {
    5.16  printk("gmfn_to_mfn_foreign: bad gpfn. spinning...\n");
     6.1 --- a/xen/include/asm-ia64/domain.h	Tue Apr 25 10:54:45 2006 -0700
     6.2 +++ b/xen/include/asm-ia64/domain.h	Tue Apr 25 13:06:57 2006 -0600
     6.3 @@ -162,6 +162,15 @@ struct mm_struct {
     6.4  
     6.5  extern struct mm_struct init_mm;
     6.6  
     6.7 +struct page_info * assign_new_domain_page(struct domain *d, unsigned long mpaddr);
     6.8 +void assign_new_domain0_page(struct domain *d, unsigned long mpaddr);
     6.9 +void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr);
    6.10 +void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags);
    6.11 +#ifdef CONFIG_XEN_IA64_DOM0_VP
    6.12 +unsigned long assign_domain_mmio_page(struct domain *d, unsigned long mpaddr, unsigned long size);
    6.13 +unsigned long assign_domain_mach_page(struct domain *d, unsigned long mpaddr, unsigned long size);
    6.14 +#endif
    6.15 +
    6.16  #include <asm/uaccess.h> /* for KERNEL_DS */
    6.17  #include <asm/pgtable.h>
    6.18  
     7.1 --- a/xen/include/asm-ia64/mm.h	Tue Apr 25 10:54:45 2006 -0700
     7.2 +++ b/xen/include/asm-ia64/mm.h	Tue Apr 25 13:06:57 2006 -0600
     7.3 @@ -415,8 +415,12 @@ extern int nr_swap_pages;
     7.4  
     7.5  extern unsigned long *mpt_table;
     7.6  extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
     7.7 +extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps);
     7.8  extern unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr);
     7.9 -#undef machine_to_phys_mapping
    7.10 +#ifdef CONFIG_XEN_IA64_DOM0_VP
    7.11 +extern unsigned long __lookup_domain_mpa(struct domain *d, unsigned long mpaddr);
    7.12 +extern unsigned long ____lookup_domain_mpa(struct domain *d, unsigned long mpaddr);
    7.13 +#endif
    7.14  #define machine_to_phys_mapping	mpt_table
    7.15  
    7.16  #define INVALID_M2P_ENTRY        (~0UL)
     8.1 --- a/xen/include/asm-ia64/vcpu.h	Tue Apr 25 10:54:45 2006 -0700
     8.2 +++ b/xen/include/asm-ia64/vcpu.h	Tue Apr 25 13:06:57 2006 -0600
     8.3 @@ -133,6 +133,7 @@ extern IA64FAULT vcpu_get_pkr(VCPU *vcpu
     8.4  extern IA64FAULT vcpu_set_pkr(VCPU *vcpu, UINT64 reg, UINT64 val);
     8.5  extern IA64FAULT vcpu_tak(VCPU *vcpu, UINT64 vadr, UINT64 *key);
     8.6  /* TLB */
     8.7 +extern void vcpu_purge_tr_entry(TR_ENTRY *trp);
     8.8  extern IA64FAULT vcpu_itr_d(VCPU *vcpu, UINT64 slot, UINT64 padr,
     8.9  		UINT64 itir, UINT64 ifa);
    8.10  extern IA64FAULT vcpu_itr_i(VCPU *vcpu, UINT64 slot, UINT64 padr,
     9.1 --- a/xen/include/asm-ia64/xenprocessor.h	Tue Apr 25 10:54:45 2006 -0700
     9.2 +++ b/xen/include/asm-ia64/xenprocessor.h	Tue Apr 25 13:06:57 2006 -0600
     9.3 @@ -221,4 +221,20 @@ typedef union {
     9.4  
     9.5  DECLARE_PER_CPU(cpu_kr_ia64_t, cpu_kr);
     9.6  
     9.7 +typedef union {
     9.8 +    struct {
     9.9 +        u64 rv3  :  2; // 0-1
    9.10 +        u64 ps   :  6; // 2-7
    9.11 +        u64 key  : 24; // 8-31
    9.12 +        u64 rv4  : 32; // 32-63
    9.13 +    };
    9.14 +    struct {
    9.15 +        u64 __rv3  : 32; // 0-31
    9.16 +        // next extension to rv4
    9.17 +        u64 rid  : 24;  // 32-55
    9.18 +        u64 __rv4  : 8; // 56-63
    9.19 +    };
    9.20 +    u64 itir;
    9.21 +} ia64_itir_t;
    9.22 +
    9.23  #endif // _ASM_IA64_XENPROCESSOR_H