ia64/xen-unstable

changeset 15942:1902a21dd1ae

Merge
author Tim Deegan <Tim.Deegan@xensource.com>
date Thu Sep 20 10:13:25 2007 +0100 (2007-09-20)
parents 4c8394e3b011 b33ee2276b6a
children c2d651811741
files
line diff
     1.1 --- a/tools/ioemu/Makefile.target	Wed Sep 19 17:44:23 2007 +0100
     1.2 +++ b/tools/ioemu/Makefile.target	Thu Sep 20 10:13:25 2007 +0100
     1.3 @@ -197,9 +197,6 @@ CPPFLAGS+=-D_GNU_SOURCE
     1.4  LIBS+=-lm
     1.5  LIBS+=-L../../libxc -lxenctrl -lxenguest
     1.6  LIBS+=-L../../xenstore -lxenstore
     1.7 -ifdef CONFIG_PASSTHROUGH
     1.8 -LIBS+=-lpci
     1.9 -endif
    1.10  ifndef CONFIG_USER_ONLY
    1.11  LIBS+=-lz
    1.12  endif
    1.13 @@ -351,6 +348,16 @@ ifdef CONFIG_WIN32
    1.14  VL_OBJS+=tap-win32.o
    1.15  endif
    1.16  
    1.17 +ifeq (,$(wildcard /usr/include/pci))
    1.18 +$(warning *** pciutils-devl package not found - missing /usr/include/pci)
    1.19 +$(warning *** PCI passthrough capability has been disabled)
    1.20 +else
    1.21 +LIBS+=-lpci
    1.22 +VL_OBJS+= pass-through.o
    1.23 +CFLAGS += -DCONFIG_PASSTHROUGH
    1.24 +$(info *** PCI passthrough capability has been enabled ***)
    1.25 +endif
    1.26 +
    1.27  SOUND_HW = sb16.o es1370.o
    1.28  AUDIODRV = audio.o noaudio.o wavaudio.o
    1.29  ifdef CONFIG_SDL
    1.30 @@ -403,9 +410,6 @@ VL_OBJS+= piix4acpi.o
    1.31  VL_OBJS+= xenstore.o
    1.32  VL_OBJS+= xen_platform.o
    1.33  VL_OBJS+= tpm_tis.o
    1.34 -ifdef CONFIG_PASSTHROUGH
    1.35 -VL_OBJS+= pass-through.o
    1.36 -endif
    1.37  CPPFLAGS += -DHAS_AUDIO
    1.38  endif
    1.39  ifeq ($(TARGET_BASE_ARCH), ppc)
     2.1 --- a/xen/arch/x86/domctl.c	Wed Sep 19 17:44:23 2007 +0100
     2.2 +++ b/xen/arch/x86/domctl.c	Thu Sep 20 10:13:25 2007 +0100
     2.3 @@ -25,6 +25,8 @@
     2.4  #include <asm/hvm/support.h>
     2.5  #include <asm/processor.h>
     2.6  #include <xsm/xsm.h>
     2.7 +#include <xen/list.h>
     2.8 +#include <asm/iommu.h>
     2.9  
    2.10  long arch_do_domctl(
    2.11      struct xen_domctl *domctl,
    2.12 @@ -523,6 +525,155 @@ long arch_do_domctl(
    2.13      }
    2.14      break;
    2.15  
    2.16 +    case XEN_DOMCTL_assign_device:
    2.17 +    {
    2.18 +        struct domain *d;
    2.19 +        struct hvm_iommu *hd;
    2.20 +        u8 bus, devfn;
    2.21 +
    2.22 +        if (!vtd_enabled)
    2.23 +            break;
    2.24 +
    2.25 +        ret = -EINVAL;
    2.26 +        if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) ) {
    2.27 +            gdprintk(XENLOG_ERR,
    2.28 +                "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n"); 
    2.29 +            break;
    2.30 +        }
    2.31 +        hd = domain_hvm_iommu(d);
    2.32 +        bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
    2.33 +        devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
    2.34 +        ret = assign_device(d, bus, devfn);
    2.35 +        gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: bdf = %x:%x:%x\n",
    2.36 +            bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
    2.37 +        put_domain(d);
    2.38 +    }
    2.39 +    break;
    2.40 +
    2.41 +    case XEN_DOMCTL_bind_pt_irq:
    2.42 +    {
    2.43 +        struct domain * d;
    2.44 +        xen_domctl_bind_pt_irq_t * bind;
    2.45 +
    2.46 +        ret = -ESRCH;
    2.47 +        if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
    2.48 +            break;
    2.49 +        bind = &(domctl->u.bind_pt_irq);
    2.50 +        if (vtd_enabled)
    2.51 +            ret = pt_irq_create_bind_vtd(d, bind);
    2.52 +        if (ret < 0)
    2.53 +            gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");
    2.54 +        rcu_unlock_domain(d);
    2.55 +    }
    2.56 +    break;    
    2.57 +
    2.58 +    case XEN_DOMCTL_memory_mapping:
    2.59 +    {
    2.60 +        struct domain *d;
    2.61 +        unsigned long gfn = domctl->u.memory_mapping.first_gfn;
    2.62 +        unsigned long mfn = domctl->u.memory_mapping.first_mfn;
    2.63 +        unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
    2.64 +        int i;
    2.65 +
    2.66 +        ret = -EINVAL;
    2.67 +        if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
    2.68 +            break;
    2.69 +
    2.70 +        ret = -ESRCH;
    2.71 +        if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
    2.72 +            break;
    2.73 +
    2.74 +        ret=0;        
    2.75 +        if ( domctl->u.memory_mapping.add_mapping ) 
    2.76 +        {
    2.77 +            gdprintk(XENLOG_INFO,
    2.78 +                "memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n",
    2.79 +                gfn, mfn, nr_mfns);   
    2.80 +            
    2.81 +            ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
    2.82 +            for ( i = 0; i < nr_mfns; i++ )
    2.83 +                set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i)); 
    2.84 +        }
    2.85 +        else 
    2.86 +        {
    2.87 +            gdprintk(XENLOG_INFO,
    2.88 +                "memory_map:remove: gfn=%lx mfn=%lx nr_mfns=%lx\n",
    2.89 +                 gfn, mfn, nr_mfns);
    2.90 +
    2.91 +            for ( i = 0; i < nr_mfns; i++ )
    2.92 +                clear_mmio_p2m_entry(d, gfn+i); 
    2.93 +            ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
    2.94 +        }
    2.95 +
    2.96 +        rcu_unlock_domain(d);
    2.97 +    }
    2.98 +    break;
    2.99 +
   2.100 +    case XEN_DOMCTL_ioport_mapping:
   2.101 +    {
   2.102 +#define MAX_IOPORTS    0x10000
   2.103 +        struct domain *d;
   2.104 +        struct hvm_iommu *hd;
   2.105 +        unsigned int fgp = domctl->u.ioport_mapping.first_gport;
   2.106 +        unsigned int fmp = domctl->u.ioport_mapping.first_mport;
   2.107 +        unsigned int np = domctl->u.ioport_mapping.nr_ports;
   2.108 +        struct g2m_ioport *g2m_ioport;
   2.109 +        int found = 0;
   2.110 +
   2.111 +        ret = -EINVAL;
   2.112 +        if ( (np == 0) || (fgp > MAX_IOPORTS) || (fmp > MAX_IOPORTS) ||
   2.113 +            ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) )
   2.114 +        {
   2.115 +            gdprintk(XENLOG_ERR,
   2.116 +                "ioport_map:invalid:gport=%x mport=%x nr_ports=%x\n",
   2.117 +                fgp, fmp, np);
   2.118 +            break;
   2.119 +        }
   2.120 +
   2.121 +        ret = -ESRCH;
   2.122 +        if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
   2.123 +            break;
   2.124 +
   2.125 +        hd = domain_hvm_iommu(d);
   2.126 +        if ( domctl->u.ioport_mapping.add_mapping )
   2.127 +        {
   2.128 +            gdprintk(XENLOG_INFO,
   2.129 +                "ioport_map:add f_gport=%x f_mport=%x np=%x\n",
   2.130 +                fgp, fmp, np);
   2.131 +                
   2.132 +            list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
   2.133 +                if (g2m_ioport->mport == fmp ) {
   2.134 +                    g2m_ioport->gport = fgp;
   2.135 +                    g2m_ioport->np = np;                    
   2.136 +                    found = 1;
   2.137 +                    break;
   2.138 +                }
   2.139 +            if ( !found ) 
   2.140 +            {                 
   2.141 +                g2m_ioport = xmalloc(struct g2m_ioport);
   2.142 +                g2m_ioport->gport = fgp;
   2.143 +                g2m_ioport->mport = fmp;
   2.144 +                g2m_ioport->np = np;
   2.145 +                list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
   2.146 +            } 
   2.147 +            ret = ioports_permit_access(d, fmp, fmp + np - 1);
   2.148 +            
   2.149 +        }
   2.150 +        else {
   2.151 +            gdprintk(XENLOG_INFO,
   2.152 +                "ioport_map:remove f_gport=%x f_mport=%x np=%x\n",
   2.153 +                fgp, fmp, np);
   2.154 +            list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
   2.155 +                if ( g2m_ioport->mport == fmp ) {
   2.156 +                    list_del(&g2m_ioport->list);
   2.157 +                    break;
   2.158 +                }
   2.159 +            ret = ioports_deny_access(d, fmp, fmp + np - 1);
   2.160 +        }
   2.161 +        rcu_unlock_domain(d);
   2.162 +    }
   2.163 +    break;    
   2.164 +
   2.165      default:
   2.166          ret = -ENOSYS;
   2.167          break;
     3.1 --- a/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c	Wed Sep 19 17:44:23 2007 +0100
     3.2 +++ b/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c	Thu Sep 20 10:13:25 2007 +0100
     3.3 @@ -27,6 +27,7 @@
     3.4  #include <xen/domain_page.h>
     3.5  #include <asm/delay.h>
     3.6  #include <asm/string.h>
     3.7 +#include <asm/mm.h>
     3.8  #include <asm/iommu.h>
     3.9  #include <asm/hvm/vmx/intel-iommu.h>
    3.10  #include "dmar.h"
    3.11 @@ -1669,6 +1670,7 @@ int iommu_setup(void)
    3.12      struct hvm_iommu *hd  = domain_hvm_iommu(dom0);
    3.13      struct acpi_drhd_unit *drhd;
    3.14      struct iommu *iommu;
    3.15 +    unsigned long i;
    3.16  
    3.17      if (!vtd_enabled)
    3.18          return 0;
    3.19 @@ -1687,8 +1689,9 @@ int iommu_setup(void)
    3.20      drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
    3.21      iommu = drhd->iommu;
    3.22  
    3.23 -    hd->pgd = (struct dma_pte *)alloc_xenheap_page();
    3.24 -    memset((u8*)hd->pgd, 0, PAGE_SIZE);
    3.25 +    /* setup 1:1 page table for dom0 */
    3.26 +    for (i = 0; i < max_page; i++)
    3.27 +        iommu_map_page(dom0, i, i);
    3.28  
    3.29      if (init_vtd_hw())
    3.30          goto error;
     4.1 --- a/xen/arch/x86/mm/p2m.c	Wed Sep 19 17:44:23 2007 +0100
     4.2 +++ b/xen/arch/x86/mm/p2m.c	Thu Sep 20 10:13:25 2007 +0100
     4.3 @@ -27,6 +27,7 @@
     4.4  #include <asm/page.h>
     4.5  #include <asm/paging.h>
     4.6  #include <asm/p2m.h>
     4.7 +#include <asm/iommu.h>
     4.8  
     4.9  /* Debugging and auditing of the P2M code? */
    4.10  #define P2M_AUDIT     0
    4.11 @@ -244,7 +245,7 @@ set_p2m_entry(struct domain *d, unsigned
    4.12      if ( mfn_valid(mfn) && (gfn > d->arch.p2m.max_mapped_pfn) )
    4.13          d->arch.p2m.max_mapped_pfn = gfn;
    4.14  
    4.15 -    if ( mfn_valid(mfn) )
    4.16 +    if ( mfn_valid(mfn) || (p2mt == p2m_mmio_direct) )
    4.17          entry_content = l1e_from_pfn(mfn_x(mfn), p2m_type_to_flags(p2mt));
    4.18      else
    4.19          entry_content = l1e_empty();
    4.20 @@ -252,6 +253,9 @@ set_p2m_entry(struct domain *d, unsigned
    4.21      /* level 1 entry */
    4.22      paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 1);
    4.23  
    4.24 +    if ( vtd_enabled && (p2mt == p2m_mmio_direct) && is_hvm_domain(d) )
    4.25 +        iommu_flush(d, gfn, (u64*)p2m_entry);
    4.26 +
    4.27      /* Success */
    4.28      rv = 1;
    4.29  
    4.30 @@ -351,6 +355,11 @@ int p2m_alloc_table(struct domain *d,
    4.31              goto error;
    4.32      }
    4.33  
    4.34 +#if CONFIG_PAGING_LEVELS >= 3
    4.35 +    if (vtd_enabled && is_hvm_domain(d))
    4.36 +        iommu_set_pgd(d);
    4.37 +#endif
    4.38 +
    4.39      P2M_PRINTK("p2m table initialised (%u pages)\n", page_count);
    4.40      p2m_unlock(d);
    4.41      return 0;
    4.42 @@ -860,6 +869,42 @@ p2m_type_t p2m_change_type(struct domain
    4.43      return pt;
    4.44  }
    4.45  
    4.46 +int
    4.47 +set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
    4.48 +{
    4.49 +    int rc = 0;
    4.50 +
    4.51 +    rc = set_p2m_entry(d, gfn, mfn, p2m_mmio_direct);
    4.52 +    if ( 0 == rc )
    4.53 +        gdprintk(XENLOG_ERR,
    4.54 +            "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
    4.55 +            gmfn_to_mfn(d, gfn));
    4.56 +    return rc;
    4.57 +}
    4.58 +
    4.59 +int
    4.60 +clear_mmio_p2m_entry(struct domain *d, unsigned long gfn)
    4.61 +{
    4.62 +    int rc = 0;
    4.63 +
    4.64 +    unsigned long mfn;
    4.65 +    mfn = gmfn_to_mfn(d, gfn);
    4.66 +    if ( INVALID_MFN == mfn )
    4.67 +    {
    4.68 +        gdprintk(XENLOG_ERR,
    4.69 +            "clear_mmio_p2m_entry: gfn_to_mfn failed! gfn=%08lx\n", gfn);
    4.70 +        return 0;
    4.71 +    }
    4.72 +    rc = set_p2m_entry(d, gfn, _mfn(INVALID_MFN), 0);
    4.73 +
    4.74 +#if !defined(__x86_64__)
    4.75 +    /* x86_64 xen does not map mmio entries in machine_to_phys_mapp[] */
    4.76 +    set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
    4.77 +#endif
    4.78 +
    4.79 +    return rc;
    4.80 +}
    4.81 +
    4.82  /*
    4.83   * Local variables:
    4.84   * mode: C
     5.1 --- a/xen/arch/x86/mm/shadow/multi.c	Wed Sep 19 17:44:23 2007 +0100
     5.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Thu Sep 20 10:13:25 2007 +0100
     5.3 @@ -685,7 +685,7 @@ static always_inline void
     5.4      /* N.B. For pass-through MMIO, either this test needs to be relaxed,
     5.5       * and shadow_set_l1e() trained to handle non-valid MFNs (ugh), or the
     5.6       * MMIO areas need to be added to the frame-table to make them "valid". */
     5.7 -    if ( !mfn_valid(target_mfn) )
     5.8 +    if ( !mfn_valid(target_mfn) && (p2mt != p2m_mmio_direct) )
     5.9      {
    5.10          ASSERT((ft == ft_prefetch));
    5.11          *sp = shadow_l1e_empty();
     6.1 --- a/xen/include/asm-x86/p2m.h	Wed Sep 19 17:44:23 2007 +0100
     6.2 +++ b/xen/include/asm-x86/p2m.h	Thu Sep 20 10:13:25 2007 +0100
     6.3 @@ -222,6 +222,10 @@ void p2m_change_type_global(struct domai
     6.4  p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn,
     6.5                             p2m_type_t ot, p2m_type_t nt);
     6.6  
     6.7 +/* Set mmio addresses in the p2m table (for pass-through) */
     6.8 +int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
     6.9 +int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
    6.10 +
    6.11  #endif /* _XEN_P2M_H */
    6.12  
    6.13  /*