#include <asm/hvm/support.h>
#include <asm/processor.h>
#include <xsm/xsm.h>
+#include <xen/list.h>
+#include <asm/iommu.h>
long arch_do_domctl(
struct xen_domctl *domctl,
}
break;
+ case XEN_DOMCTL_assign_device:
+ {
+ struct domain *d;
+ struct hvm_iommu *hd;
+ u8 bus, devfn;
+
+ if (!vtd_enabled)
+ break;
+
+ ret = -EINVAL;
+ if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) ) {
+ gdprintk(XENLOG_ERR,
+ "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n");
+ break;
+ }
+ hd = domain_hvm_iommu(d);
+ bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
+ devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
+ ret = assign_device(d, bus, devfn);
+ gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: bdf = %x:%x:%x\n",
+ bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ put_domain(d);
+ }
+ break;
+
+ case XEN_DOMCTL_bind_pt_irq:
+ {
+ struct domain * d;
+ xen_domctl_bind_pt_irq_t * bind;
+
+ ret = -ESRCH;
+ if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
+ break;
+ bind = &(domctl->u.bind_pt_irq);
+ if (vtd_enabled)
+ ret = pt_irq_create_bind_vtd(d, bind);
+ if (ret < 0)
+ gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");
+ rcu_unlock_domain(d);
+ }
+ break;
+
+ case XEN_DOMCTL_memory_mapping:
+ {
+ struct domain *d;
+ unsigned long gfn = domctl->u.memory_mapping.first_gfn;
+ unsigned long mfn = domctl->u.memory_mapping.first_mfn;
+ unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
+ int i;
+
+ ret = -EINVAL;
+ if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
+ break;
+
+ ret = -ESRCH;
+ if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
+ break;
+
+ ret=0;
+ if ( domctl->u.memory_mapping.add_mapping )
+ {
+ gdprintk(XENLOG_INFO,
+ "memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n",
+ gfn, mfn, nr_mfns);
+
+ ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
+ for ( i = 0; i < nr_mfns; i++ )
+ set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i));
+ }
+ else
+ {
+ gdprintk(XENLOG_INFO,
+ "memory_map:remove: gfn=%lx mfn=%lx nr_mfns=%lx\n",
+ gfn, mfn, nr_mfns);
+
+ for ( i = 0; i < nr_mfns; i++ )
+ clear_mmio_p2m_entry(d, gfn+i);
+ ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
+ }
+
+ rcu_unlock_domain(d);
+ }
+ break;
+
+ case XEN_DOMCTL_ioport_mapping:
+ {
+#define MAX_IOPORTS 0x10000
+ struct domain *d;
+ struct hvm_iommu *hd;
+ unsigned int fgp = domctl->u.ioport_mapping.first_gport;
+ unsigned int fmp = domctl->u.ioport_mapping.first_mport;
+ unsigned int np = domctl->u.ioport_mapping.nr_ports;
+ struct g2m_ioport *g2m_ioport;
+ int found = 0;
+
+ ret = -EINVAL;
+ if ( (np == 0) || (fgp > MAX_IOPORTS) || (fmp > MAX_IOPORTS) ||
+ ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) )
+ {
+ gdprintk(XENLOG_ERR,
+ "ioport_map:invalid:gport=%x mport=%x nr_ports=%x\n",
+ fgp, fmp, np);
+ break;
+ }
+
+ ret = -ESRCH;
+ if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
+ break;
+
+ hd = domain_hvm_iommu(d);
+ if ( domctl->u.ioport_mapping.add_mapping )
+ {
+ gdprintk(XENLOG_INFO,
+ "ioport_map:add f_gport=%x f_mport=%x np=%x\n",
+ fgp, fmp, np);
+
+ list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
+ if (g2m_ioport->mport == fmp ) {
+ g2m_ioport->gport = fgp;
+ g2m_ioport->np = np;
+ found = 1;
+ break;
+ }
+ if ( !found )
+ {
+ g2m_ioport = xmalloc(struct g2m_ioport);
+ g2m_ioport->gport = fgp;
+ g2m_ioport->mport = fmp;
+ g2m_ioport->np = np;
+ list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
+ }
+ ret = ioports_permit_access(d, fmp, fmp + np - 1);
+
+ }
+ else {
+ gdprintk(XENLOG_INFO,
+ "ioport_map:remove f_gport=%x f_mport=%x np=%x\n",
+ fgp, fmp, np);
+ list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
+ if ( g2m_ioport->mport == fmp ) {
+ list_del(&g2m_ioport->list);
+ break;
+ }
+ ret = ioports_deny_access(d, fmp, fmp + np - 1);
+ }
+ rcu_unlock_domain(d);
+ }
+ break;
+
default:
ret = -ENOSYS;
break;
#include <asm/page.h>
#include <asm/paging.h>
#include <asm/p2m.h>
+#include <asm/iommu.h>
/* Debugging and auditing of the P2M code? */
#define P2M_AUDIT 0
if ( mfn_valid(mfn) && (gfn > d->arch.p2m.max_mapped_pfn) )
d->arch.p2m.max_mapped_pfn = gfn;
- if ( mfn_valid(mfn) )
+ if ( mfn_valid(mfn) || (p2mt == p2m_mmio_direct) )
entry_content = l1e_from_pfn(mfn_x(mfn), p2m_type_to_flags(p2mt));
else
entry_content = l1e_empty();
/* level 1 entry */
paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 1);
+ if ( vtd_enabled && (p2mt == p2m_mmio_direct) && is_hvm_domain(d) )
+ iommu_flush(d, gfn, (u64*)p2m_entry);
+
/* Success */
rv = 1;
goto error;
}
+#if CONFIG_PAGING_LEVELS >= 3
+ if (vtd_enabled && is_hvm_domain(d))
+ iommu_set_pgd(d);
+#endif
+
P2M_PRINTK("p2m table initialised (%u pages)\n", page_count);
p2m_unlock(d);
return 0;
return pt;
}
+int
+set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
+{
+ int rc = 0;
+
+ rc = set_p2m_entry(d, gfn, mfn, p2m_mmio_direct);
+ if ( 0 == rc )
+ gdprintk(XENLOG_ERR,
+ "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
+ gmfn_to_mfn(d, gfn));
+ return rc;
+}
+
+int
+clear_mmio_p2m_entry(struct domain *d, unsigned long gfn)
+{
+ int rc = 0;
+
+ unsigned long mfn;
+ mfn = gmfn_to_mfn(d, gfn);
+ if ( INVALID_MFN == mfn )
+ {
+ gdprintk(XENLOG_ERR,
+ "clear_mmio_p2m_entry: gfn_to_mfn failed! gfn=%08lx\n", gfn);
+ return 0;
+ }
+ rc = set_p2m_entry(d, gfn, _mfn(INVALID_MFN), 0);
+
+#if !defined(__x86_64__)
+ /* x86_64 xen does not map mmio entries in machine_to_phys_mapp[] */
+ set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
+#endif
+
+ return rc;
+}
+
/*
* Local variables:
* mode: C