ia64/xen-unstable

changeset 10152:40959bc0a269

[IA64] GNTMAP_readonly support xen part

add grant table GNTMAP_readonly support.
introduce ASSIGN_readonly flags for read only page assignment to
pseudo physical address space.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Wed May 24 10:56:47 2006 -0600 (2006-05-24)
parents 2cab08ac143b
children 5674e4fe8f02
files xen/arch/ia64/vmx/vmx_init.c xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/process.c xen/include/asm-ia64/domain.h xen/include/public/arch-ia64.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmx_init.c	Wed May 24 10:39:55 2006 -0600
     1.2 +++ b/xen/arch/ia64/vmx/vmx_init.c	Wed May 24 10:56:47 2006 -0600
     1.3 @@ -346,7 +346,7 @@ int vmx_build_physmap_table(struct domai
     1.4  	    for (j = io_ranges[i].start;
     1.5  		 j < io_ranges[i].start + io_ranges[i].size;
     1.6  		 j += PAGE_SIZE)
     1.7 -		__assign_domain_page(d, j, io_ranges[i].type);
     1.8 +		__assign_domain_page(d, j, io_ranges[i].type, ASSIGN_writable);
     1.9  	}
    1.10  
    1.11  	/* Map normal memory below 3G */
     2.1 --- a/xen/arch/ia64/xen/domain.c	Wed May 24 10:39:55 2006 -0600
     2.2 +++ b/xen/arch/ia64/xen/domain.c	Wed May 24 10:56:47 2006 -0600
     2.3 @@ -834,17 +834,19 @@ assign_new_domain0_page(struct domain *d
     2.4  }
     2.5  
     2.6  /* map a physical address to the specified metaphysical addr */
     2.7 +// flags: currently only ASSIGN_readonly
     2.8  void
     2.9  __assign_domain_page(struct domain *d,
    2.10 -                     unsigned long mpaddr, unsigned long physaddr)
    2.11 +                     unsigned long mpaddr, unsigned long physaddr,
    2.12 +                     unsigned long flags)
    2.13  {
    2.14      pte_t *pte;
    2.15 +    unsigned long arflags = (flags & ASSIGN_readonly)? _PAGE_AR_R: _PAGE_AR_RWX;
    2.16  
    2.17      pte = lookup_alloc_domain_pte(d, mpaddr);
    2.18      if (pte_none(*pte)) {
    2.19 -        set_pte(pte,
    2.20 -                pfn_pte(physaddr >> PAGE_SHIFT,
    2.21 -                        __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
    2.22 +        set_pte(pte, pfn_pte(physaddr >> PAGE_SHIFT,
    2.23 +                             __pgprot(__DIRTY_BITS | _PAGE_PL_2 | arflags)));
    2.24          mb ();
    2.25      } else
    2.26          printk("%s: mpaddr %lx already mapped!\n", __func__, mpaddr);
    2.27 @@ -861,7 +863,7 @@ assign_domain_page(struct domain *d,
    2.28      BUG_ON((physaddr & GPFN_IO_MASK) != GPFN_MEM);
    2.29      ret = get_page(page, d);
    2.30      BUG_ON(ret == 0);
    2.31 -    __assign_domain_page(d, mpaddr, physaddr);
    2.32 +    __assign_domain_page(d, mpaddr, physaddr, ASSIGN_writable);
    2.33  
    2.34      //XXX CONFIG_XEN_IA64_DOM0_VP
    2.35      //    TODO racy
    2.36 @@ -871,12 +873,12 @@ assign_domain_page(struct domain *d,
    2.37  #ifdef CONFIG_XEN_IA64_DOM0_VP
    2.38  static void
    2.39  assign_domain_same_page(struct domain *d,
    2.40 -                          unsigned long mpaddr, unsigned long size)
    2.41 +                        unsigned long mpaddr, unsigned long size)
    2.42  {
    2.43      //XXX optimization
    2.44      unsigned long end = mpaddr + size;
    2.45      for (; mpaddr < end; mpaddr += PAGE_SIZE) {
    2.46 -        __assign_domain_page(d, mpaddr, mpaddr);
    2.47 +        __assign_domain_page(d, mpaddr, mpaddr, ASSIGN_writable);
    2.48      }
    2.49  }
    2.50  
    2.51 @@ -1113,15 +1115,14 @@ unsigned long lookup_domain_mpa(struct d
    2.52  		}
    2.53  		pteval = pfn_pte(mpaddr >> PAGE_SHIFT,
    2.54  			__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX));
    2.55 -		pte = &pteval;
    2.56 -		return *(unsigned long *)pte;
    2.57 +		return pte_val(pteval);
    2.58  	}
    2.59  #endif
    2.60  	pte = lookup_noalloc_domain_pte(d, mpaddr);
    2.61  	if (pte != NULL) {
    2.62  		if (pte_present(*pte)) {
    2.63  //printk("lookup_domain_page: found mapping for %lx, pte=%lx\n",mpaddr,pte_val(*pte));
    2.64 -			return *(unsigned long *)pte;
    2.65 +			return pte_val(*pte);
    2.66  		} else if (VMX_DOMAIN(d->vcpu[0]))
    2.67  			return GPFN_INV_MASK;
    2.68  	}
    2.69 @@ -1135,7 +1136,10 @@ unsigned long lookup_domain_mpa(struct d
    2.70  		printk("%s: bad mpa 0x%lx (=> 0x%lx)\n", __func__,
    2.71  		       mpaddr, (unsigned long)d->max_pages << PAGE_SHIFT);
    2.72  	mpafoo(mpaddr);
    2.73 -	return 0;
    2.74 +
    2.75 +	//XXX This is a work around until the emulation memory access to a region
    2.76 +	//    where memory or device are attached is implemented.
    2.77 +	return pte_val(pfn_pte(0, __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX)));
    2.78  }
    2.79  
    2.80  #ifdef CONFIG_XEN_IA64_DOM0_VP
    2.81 @@ -1159,19 +1163,21 @@ out:
    2.82  
    2.83  // caller must get_page(mfn_to_page(mfn)) before
    2.84  // caller must call set_gpfn_from_mfn().
    2.85 +// flags: currently only ASSIGN_readonly
    2.86  static void
    2.87  assign_domain_page_replace(struct domain *d, unsigned long mpaddr,
    2.88 -                           unsigned long mfn, unsigned int flags)
    2.89 +                           unsigned long mfn, unsigned long flags)
    2.90  {
    2.91      struct mm_struct *mm = &d->arch.mm;
    2.92      pte_t* pte;
    2.93      pte_t old_pte;
    2.94      pte_t npte;
    2.95 +    unsigned long arflags = (flags & ASSIGN_readonly)? _PAGE_AR_R: _PAGE_AR_RWX;
    2.96  
    2.97      pte = lookup_alloc_domain_pte(d, mpaddr);
    2.98  
    2.99      // update pte
   2.100 -    npte = pfn_pte(mfn, __pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RWX));
   2.101 +    npte = pfn_pte(mfn, __pgprot(__DIRTY_BITS | _PAGE_PL_2 | arflags));
   2.102      old_pte = ptep_xchg(mm, mpaddr, pte, npte);
   2.103      if (!pte_none(old_pte)) {
   2.104          unsigned long old_mfn;
   2.105 @@ -1200,11 +1206,11 @@ assign_domain_page_replace(struct domain
   2.106  
   2.107  unsigned long
   2.108  dom0vp_add_physmap(struct domain* d, unsigned long gpfn, unsigned long mfn,
   2.109 -                   unsigned int flags, domid_t domid)
   2.110 +                   unsigned long flags, domid_t domid)
   2.111  {
   2.112      int error = 0;
   2.113 +    struct domain* rd;
   2.114  
   2.115 -    struct domain* rd;
   2.116      rd = find_domain_by_id(domid);
   2.117      if (unlikely(rd == NULL)) {
   2.118          switch (domid) {
   2.119 @@ -1234,7 +1240,7 @@ dom0vp_add_physmap(struct domain* d, uns
   2.120          goto out1;
   2.121      }
   2.122  
   2.123 -    assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, 0/* flags:XXX */);
   2.124 +    assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, flags);
   2.125      //don't update p2m table because this page belongs to rd, not d.
   2.126  out1:
   2.127      put_domain(rd);
   2.128 @@ -1254,23 +1260,18 @@ create_grant_host_mapping(unsigned long 
   2.129      struct page_info* page;
   2.130      int ret;
   2.131  
   2.132 -    if (flags & (GNTMAP_application_map | GNTMAP_contains_pte)) {
   2.133 +    if (flags & (GNTMAP_device_map | 
   2.134 +                 GNTMAP_application_map | GNTMAP_contains_pte)) {
   2.135          DPRINTK("%s: flags 0x%x\n", __func__, flags);
   2.136          return GNTST_general_error;
   2.137      }
   2.138 -    if (flags & GNTMAP_readonly) {
   2.139 -#if 0
   2.140 -        DPRINTK("%s: GNTMAP_readonly is not implemented yet. flags %x\n",
   2.141 -                __func__, flags);
   2.142 -#endif
   2.143 -        flags &= ~GNTMAP_readonly;
   2.144 -    }
   2.145  
   2.146      page = mfn_to_page(mfn);
   2.147      ret = get_page(page, page_get_owner(page));
   2.148      BUG_ON(ret == 0);
   2.149 -    assign_domain_page_replace(d, gpaddr, mfn, flags);
   2.150  
   2.151 +    assign_domain_page_replace(d, gpaddr, mfn, (flags & GNTMAP_readonly)?
   2.152 +                                              ASSIGN_readonly: ASSIGN_writable);
   2.153      return GNTST_okay;
   2.154  }
   2.155  
   2.156 @@ -1289,22 +1290,17 @@ destroy_grant_host_mapping(unsigned long
   2.157          DPRINTK("%s: flags 0x%x\n", __func__, flags);
   2.158          return GNTST_general_error;
   2.159      }
   2.160 -    if (flags & GNTMAP_readonly) {
   2.161 -#if 0
   2.162 -        DPRINTK("%s: GNTMAP_readonly is not implemented yet. flags %x\n",
   2.163 -                __func__, flags);
   2.164 -#endif
   2.165 -        flags &= ~GNTMAP_readonly;
   2.166 -    }
   2.167  
   2.168      pte = lookup_noalloc_domain_pte(d, gpaddr);
   2.169      if (pte == NULL || !pte_present(*pte) || pte_pfn(*pte) != mfn)
   2.170 -        return GNTST_general_error;//XXX GNTST_bad_pseudo_phys_addr
   2.171 +        return GNTST_general_error;
   2.172  
   2.173      // update pte
   2.174      old_pte = ptep_get_and_clear(&d->arch.mm, gpaddr, pte);
   2.175      if (pte_present(old_pte)) {
   2.176 -        old_mfn = pte_pfn(old_pte);//XXX
   2.177 +        old_mfn = pte_pfn(old_pte);
   2.178 +    } else {
   2.179 +        return GNTST_general_error;
   2.180      }
   2.181      domain_page_flush(d, gpaddr, old_mfn, INVALID_MFN);
   2.182  
   2.183 @@ -1405,7 +1401,7 @@ guest_physmap_add_page(struct domain *d,
   2.184  
   2.185      ret = get_page(mfn_to_page(mfn), d);
   2.186      BUG_ON(ret == 0);
   2.187 -    assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, 0/* XXX */);
   2.188 +    assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, ASSIGN_writable);
   2.189      set_gpfn_from_mfn(mfn, gpfn);//XXX SMP
   2.190  
   2.191      //BUG_ON(mfn != ((lookup_domain_mpa(d, gpfn << PAGE_SHIFT) & _PFN_MASK) >> PAGE_SHIFT));
     3.1 --- a/xen/arch/ia64/xen/process.c	Wed May 24 10:39:55 2006 -0600
     3.2 +++ b/xen/arch/ia64/xen/process.c	Wed May 24 10:56:47 2006 -0600
     3.3 @@ -85,6 +85,8 @@ u64 translate_domain_pte(u64 pteval, u64
     3.4  	struct domain *d = current->domain;
     3.5  	ia64_itir_t itir = {.itir = itir__};
     3.6  	u64 mask, mpaddr, pteval2;
     3.7 +	u64 arflags;
     3.8 +	u64 arflags2;
     3.9  
    3.10  	pteval &= ((1UL << 53) - 1);// ignore [63:53] bits
    3.11  
    3.12 @@ -123,6 +125,20 @@ u64 translate_domain_pte(u64 pteval, u64
    3.13  	}
    3.14  #endif
    3.15  	pteval2 = lookup_domain_mpa(d,mpaddr);
    3.16 +	arflags  = pteval  & _PAGE_AR_MASK;
    3.17 +	arflags2 = pteval2 & _PAGE_AR_MASK;
    3.18 +	if (arflags != _PAGE_AR_R && arflags2 == _PAGE_AR_R) {
    3.19 +#if 0
    3.20 +		DPRINTK("%s:%d "
    3.21 +		        "pteval 0x%lx arflag 0x%lx address 0x%lx itir 0x%lx "
    3.22 +		        "pteval2 0x%lx arflags2 0x%lx mpaddr 0x%lx\n",
    3.23 +		        __func__, __LINE__,
    3.24 +		        pteval, arflags, address, itir__,
    3.25 +		        pteval2, arflags2, mpaddr);
    3.26 +#endif
    3.27 +		pteval = (pteval & ~_PAGE_AR_MASK) | _PAGE_AR_R;
    3.28 +}
    3.29 +
    3.30  	pteval2 &= _PAGE_PPN_MASK; // ignore non-addr bits
    3.31  	pteval2 |= (pteval & _PAGE_ED);
    3.32  	pteval2 |= _PAGE_PL_2; // force PL0->2 (PL3 is unaffected)
     4.1 --- a/xen/include/asm-ia64/domain.h	Wed May 24 10:39:55 2006 -0600
     4.2 +++ b/xen/include/asm-ia64/domain.h	Wed May 24 10:56:47 2006 -0600
     4.3 @@ -114,7 +114,7 @@ struct arch_vcpu {
     4.4  
     4.5  struct page_info * assign_new_domain_page(struct domain *d, unsigned long mpaddr);
     4.6  void assign_new_domain0_page(struct domain *d, unsigned long mpaddr);
     4.7 -void __assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr);
     4.8 +void __assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr, unsigned long flags);
     4.9  void assign_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physaddr);
    4.10  void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags);
    4.11  #ifdef CONFIG_XEN_IA64_DOM0_VP
    4.12 @@ -123,7 +123,7 @@ unsigned long assign_domain_mmio_page(st
    4.13  unsigned long assign_domain_mach_page(struct domain *d, unsigned long mpaddr, unsigned long size);
    4.14  unsigned long do_dom0vp_op(unsigned long cmd, unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3);
    4.15  unsigned long dom0vp_zap_physmap(struct domain *d, unsigned long gpfn, unsigned int extent_order);
    4.16 -unsigned long dom0vp_add_physmap(struct domain* d, unsigned long gpfn, unsigned long mfn, unsigned int flags, domid_t domid);
    4.17 +unsigned long dom0vp_add_physmap(struct domain* d, unsigned long gpfn, unsigned long mfn, unsigned long flags, domid_t domid);
    4.18  #else
    4.19  #define alloc_dom_xen_and_dom_io()      do { } while (0)
    4.20  #endif
     5.1 --- a/xen/include/public/arch-ia64.h	Wed May 24 10:39:55 2006 -0600
     5.2 +++ b/xen/include/public/arch-ia64.h	Wed May 24 10:56:47 2006 -0600
     5.3 @@ -369,6 +369,10 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_guest_conte
     5.4  #define IA64_DOM0VP_add_physmap         18      // assigne machine page frane
     5.5                                                  // to dom0's pseudo physical
     5.6                                                  // address space.
     5.7 +// flags for page assignement to pseudo physical address space
     5.8 +#define _ASSIGN_readonly                0
     5.9 +#define ASSIGN_readonly                 (1UL << _ASSIGN_readonly)
    5.10 +#define ASSIGN_writable                 (0UL << _ASSIGN_readonly) // dummy flag
    5.11  
    5.12  #endif /* !__ASSEMBLY__ */
    5.13