]> xenbits.xensource.com Git - xen.git/commitdiff
domctl and p2m changes for PCI passthru.
authorKeir Fraser <keir@xensource.com>
Thu, 20 Sep 2007 08:57:10 +0000 (09:57 +0100)
committerKeir Fraser <keir@xensource.com>
Thu, 20 Sep 2007 08:57:10 +0000 (09:57 +0100)
Signed-off-by: Allen Kay <allen.m.kay@intel.com>
Signed-off-by: Guy Zana <guy@neocleus.com>
xen/arch/x86/domctl.c
xen/arch/x86/mm/p2m.c
xen/arch/x86/mm/shadow/multi.c
xen/include/asm-x86/p2m.h

index 6d96510a8108483260b7d314125453861111884e..7a1547c9498b78b2fd8b4b8d07e5872159cbe407 100644 (file)
@@ -25,6 +25,8 @@
 #include <asm/hvm/support.h>
 #include <asm/processor.h>
 #include <xsm/xsm.h>
+#include <xen/list.h>
+#include <asm/iommu.h>
 
 long arch_do_domctl(
     struct xen_domctl *domctl,
@@ -523,6 +525,155 @@ long arch_do_domctl(
     }
     break;
 
+    case XEN_DOMCTL_assign_device:
+    {
+        struct domain *d;
+        struct hvm_iommu *hd;
+        u8 bus, devfn;
+
+        if (!vtd_enabled)
+            break;
+
+        ret = -EINVAL;
+        if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) ) {
+            gdprintk(XENLOG_ERR,
+                "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n"); 
+            break;
+        }
+        hd = domain_hvm_iommu(d);
+        bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
+        devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
+        ret = assign_device(d, bus, devfn);
+        gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: bdf = %x:%x:%x\n",
+            bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+        put_domain(d);
+    }
+    break;
+
+    case XEN_DOMCTL_bind_pt_irq:
+    {
+        struct domain * d;
+        xen_domctl_bind_pt_irq_t * bind;
+
+        ret = -ESRCH;
+        if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
+            break;
+        bind = &(domctl->u.bind_pt_irq);
+        if (vtd_enabled)
+            ret = pt_irq_create_bind_vtd(d, bind);
+        if (ret < 0)
+            gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");
+        rcu_unlock_domain(d);
+    }
+    break;    
+
+    case XEN_DOMCTL_memory_mapping:
+    {
+        struct domain *d;
+        unsigned long gfn = domctl->u.memory_mapping.first_gfn;
+        unsigned long mfn = domctl->u.memory_mapping.first_mfn;
+        unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
+        int i;
+
+        ret = -EINVAL;
+        if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
+            break;
+
+        ret = -ESRCH;
+        if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
+            break;
+
+        ret=0;        
+        if ( domctl->u.memory_mapping.add_mapping ) 
+        {
+            gdprintk(XENLOG_INFO,
+                "memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n",
+                gfn, mfn, nr_mfns);   
+            
+            ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
+            for ( i = 0; i < nr_mfns; i++ )
+                set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i)); 
+        }
+        else 
+        {
+            gdprintk(XENLOG_INFO,
+                "memory_map:remove: gfn=%lx mfn=%lx nr_mfns=%lx\n",
+                 gfn, mfn, nr_mfns);
+
+            for ( i = 0; i < nr_mfns; i++ )
+                clear_mmio_p2m_entry(d, gfn+i); 
+            ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
+        }
+
+        rcu_unlock_domain(d);
+    }
+    break;
+
+    case XEN_DOMCTL_ioport_mapping:
+    {
+#define MAX_IOPORTS    0x10000
+        struct domain *d;
+        struct hvm_iommu *hd;
+        unsigned int fgp = domctl->u.ioport_mapping.first_gport;
+        unsigned int fmp = domctl->u.ioport_mapping.first_mport;
+        unsigned int np = domctl->u.ioport_mapping.nr_ports;
+        struct g2m_ioport *g2m_ioport;
+        int found = 0;
+
+        ret = -EINVAL;
+        if ( (np == 0) || (fgp > MAX_IOPORTS) || (fmp > MAX_IOPORTS) ||
+            ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) )
+        {
+            gdprintk(XENLOG_ERR,
+                "ioport_map:invalid:gport=%x mport=%x nr_ports=%x\n",
+                fgp, fmp, np);
+            break;
+        }
+
+        ret = -ESRCH;
+        if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
+            break;
+
+        hd = domain_hvm_iommu(d);
+        if ( domctl->u.ioport_mapping.add_mapping )
+        {
+            gdprintk(XENLOG_INFO,
+                "ioport_map:add f_gport=%x f_mport=%x np=%x\n",
+                fgp, fmp, np);
+                
+            list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
+                if (g2m_ioport->mport == fmp ) {
+                    g2m_ioport->gport = fgp;
+                    g2m_ioport->np = np;                    
+                    found = 1;
+                    break;
+                }
+            if ( !found ) 
+            {                 
+                g2m_ioport = xmalloc(struct g2m_ioport);
+                g2m_ioport->gport = fgp;
+                g2m_ioport->mport = fmp;
+                g2m_ioport->np = np;
+                list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
+            } 
+            ret = ioports_permit_access(d, fmp, fmp + np - 1);
+            
+        }
+        else {
+            gdprintk(XENLOG_INFO,
+                "ioport_map:remove f_gport=%x f_mport=%x np=%x\n",
+                fgp, fmp, np);
+            list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
+                if ( g2m_ioport->mport == fmp ) {
+                    list_del(&g2m_ioport->list);
+                    break;
+                }
+            ret = ioports_deny_access(d, fmp, fmp + np - 1);
+        }
+        rcu_unlock_domain(d);
+    }
+    break;    
+
     default:
         ret = -ENOSYS;
         break;
index 5081fd03b4bb1703017c0d6ad36a9c00cbb6f6bd..5569cbe632bcde3a3272007b6576ba72f579df1e 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm/page.h>
 #include <asm/paging.h>
 #include <asm/p2m.h>
+#include <asm/iommu.h>
 
 /* Debugging and auditing of the P2M code? */
 #define P2M_AUDIT     0
@@ -244,7 +245,7 @@ set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, p2m_type_t p2mt)
     if ( mfn_valid(mfn) && (gfn > d->arch.p2m.max_mapped_pfn) )
         d->arch.p2m.max_mapped_pfn = gfn;
 
-    if ( mfn_valid(mfn) )
+    if ( mfn_valid(mfn) || (p2mt == p2m_mmio_direct) )
         entry_content = l1e_from_pfn(mfn_x(mfn), p2m_type_to_flags(p2mt));
     else
         entry_content = l1e_empty();
@@ -252,6 +253,9 @@ set_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn, p2m_type_t p2mt)
     /* level 1 entry */
     paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 1);
 
+    if ( vtd_enabled && (p2mt == p2m_mmio_direct) && is_hvm_domain(d) )
+        iommu_flush(d, gfn, (u64*)p2m_entry);
+
     /* Success */
     rv = 1;
 
@@ -351,6 +355,11 @@ int p2m_alloc_table(struct domain *d,
             goto error;
     }
 
+#if CONFIG_PAGING_LEVELS >= 3
+    if (vtd_enabled && is_hvm_domain(d))
+        iommu_set_pgd(d);
+#endif
+
     P2M_PRINTK("p2m table initialised (%u pages)\n", page_count);
     p2m_unlock(d);
     return 0;
@@ -860,6 +869,42 @@ p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn,
     return pt;
 }
 
+int
+set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
+{
+    int rc = 0;
+
+    rc = set_p2m_entry(d, gfn, mfn, p2m_mmio_direct);
+    if ( 0 == rc )
+        gdprintk(XENLOG_ERR,
+            "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
+            gmfn_to_mfn(d, gfn));
+    return rc;
+}
+
+int
+clear_mmio_p2m_entry(struct domain *d, unsigned long gfn)
+{
+    int rc = 0;
+
+    unsigned long mfn;
+    mfn = gmfn_to_mfn(d, gfn);
+    if ( INVALID_MFN == mfn )
+    {
+        gdprintk(XENLOG_ERR,
+            "clear_mmio_p2m_entry: gfn_to_mfn failed! gfn=%08lx\n", gfn);
+        return 0;
+    }
+    rc = set_p2m_entry(d, gfn, _mfn(INVALID_MFN), 0);
+
+#if !defined(__x86_64__)
+    /* x86_64 xen does not map mmio entries in machine_to_phys_mapp[] */
+    set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
+#endif
+
+    return rc;
+}
+
 /*
  * Local variables:
  * mode: C
index df0892ec7b9271b659893b44174c344621742dbb..e1e2da694f7c29d0a9d229d6a714853fdff94f5c 100644 (file)
@@ -685,7 +685,7 @@ _sh_propagate(struct vcpu *v,
     /* N.B. For pass-through MMIO, either this test needs to be relaxed,
      * and shadow_set_l1e() trained to handle non-valid MFNs (ugh), or the
      * MMIO areas need to be added to the frame-table to make them "valid". */
-    if ( !mfn_valid(target_mfn) )
+    if ( !mfn_valid(target_mfn) && (p2mt != p2m_mmio_direct) )
     {
         ASSERT((ft == ft_prefetch));
         *sp = shadow_l1e_empty();
index 649f4747f50541e4fddfcdb0995f3145c3541d39..c2801694281bd891d7b281410fe34cbcb81737f6 100644 (file)
@@ -222,6 +222,10 @@ void p2m_change_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt);
 p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn,
                            p2m_type_t ot, p2m_type_t nt);
 
+/* Set mmio addresses in the p2m table (for pass-through) */
+int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
+int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
+
 #endif /* _XEN_P2M_H */
 
 /*