case XEN_DOMCTL_ioport_mapping:
{
- struct domain_iommu *hd;
unsigned int fgp = domctl->u.ioport_mapping.first_gport;
unsigned int fmp = domctl->u.ioport_mapping.first_mport;
unsigned int np = domctl->u.ioport_mapping.nr_ports;
unsigned int add = domctl->u.ioport_mapping.add_mapping;
+ struct hvm_domain *hvm_domain;
struct g2m_ioport *g2m_ioport;
int found = 0;
+ ret = -EOPNOTSUPP;
+ if ( !is_hvm_domain(d) )
+ {
+ printk(XENLOG_G_ERR "ioport_map against non-HVM domain\n");
+ break;
+ }
+
ret = -EINVAL;
if ( ((fgp | fmp | (np - 1)) >= MAX_IOPORTS) ||
((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) )
if ( ret )
break;
- hd = dom_iommu(d);
+ hvm_domain = &d->arch.hvm_domain;
if ( add )
{
printk(XENLOG_G_INFO
"ioport_map:add: dom%d gport=%x mport=%x nr=%x\n",
d->domain_id, fgp, fmp, np);
- list_for_each_entry(g2m_ioport, &hd->arch.g2m_ioport_list, list)
+ list_for_each_entry(g2m_ioport, &hvm_domain->g2m_ioport_list, list)
if (g2m_ioport->mport == fmp )
{
g2m_ioport->gport = fgp;
g2m_ioport->gport = fgp;
g2m_ioport->mport = fmp;
g2m_ioport->np = np;
- list_add_tail(&g2m_ioport->list, &hd->arch.g2m_ioport_list);
+ list_add_tail(&g2m_ioport->list, &hvm_domain->g2m_ioport_list);
}
if ( !ret )
ret = ioports_permit_access(d, fmp, fmp + np - 1);
printk(XENLOG_G_INFO
"ioport_map:remove: dom%d gport=%x mport=%x nr=%x\n",
d->domain_id, fgp, fmp, np);
- list_for_each_entry(g2m_ioport, &hd->arch.g2m_ioport_list, list)
+ list_for_each_entry(g2m_ioport, &hvm_domain->g2m_ioport_list, list)
if ( g2m_ioport->mport == fmp )
{
list_del(&g2m_ioport->list);
spin_lock_init(&d->arch.hvm_domain.uc_lock);
spin_lock_init(&d->arch.hvm_domain.write_map.lock);
INIT_LIST_HEAD(&d->arch.hvm_domain.write_map.list);
+ INIT_LIST_HEAD(&d->arch.hvm_domain.g2m_ioport_list);
hvm_init_cacheattr_region_list(d);
void hvm_domain_destroy(struct domain *d)
{
+ struct list_head *ioport_list, *tmp;
+ struct g2m_ioport *ioport;
+
xfree(d->arch.hvm_domain.io_handler);
d->arch.hvm_domain.io_handler = NULL;
xfree(d->arch.hvm_domain.irq);
d->arch.hvm_domain.irq = NULL;
+
+ list_for_each_safe ( ioport_list, tmp,
+ &d->arch.hvm_domain.g2m_ioport_list )
+ {
+ ioport = list_entry(ioport_list, struct g2m_ioport, list);
+ list_del(&ioport->list);
+ xfree(ioport);
+ }
}
static int hvm_save_tsc_adjust(struct domain *d, hvm_domain_context_t *h)
const ioreq_t *p)
{
struct vcpu *curr = current;
- const struct domain_iommu *dio = dom_iommu(curr->domain);
+ const struct hvm_domain *hvm_domain = &curr->domain->arch.hvm_domain;
struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
struct g2m_ioport *g2m_ioport;
unsigned int start, end;
- list_for_each_entry( g2m_ioport, &dio->arch.g2m_ioport_list, list )
+ list_for_each_entry( g2m_ioport, &hvm_domain->g2m_ioport_list, list )
{
start = g2m_ioport->gport;
end = start + g2m_ioport->np;
struct domain_iommu *hd = dom_iommu(d);
spin_lock_init(&hd->arch.mapping_lock);
- INIT_LIST_HEAD(&hd->arch.g2m_ioport_list);
INIT_LIST_HEAD(&hd->arch.mapped_rmrrs);
return 0;
void arch_iommu_domain_destroy(struct domain *d)
{
- const struct domain_iommu *hd = dom_iommu(d);
- struct list_head *ioport_list, *tmp;
- struct g2m_ioport *ioport;
-
- list_for_each_safe ( ioport_list, tmp, &hd->arch.g2m_ioport_list )
- {
- ioport = list_entry(ioport_list, struct g2m_ioport, list);
- list_del(&ioport->list);
- xfree(ioport);
- }
}
/*
unsigned long *io_bitmap;
+ /* List of guest to machine IO ports mapping. */
+ struct list_head g2m_ioport_list;
+
/* List of permanently write-mapped pages. */
struct {
spinlock_t lock;
u64 pgd_maddr; /* io page directory machine address */
spinlock_t mapping_lock; /* io page table lock */
int agaw; /* adjusted guest address width, 0 is level 2 30-bit */
- struct list_head g2m_ioport_list; /* guest to machine ioport mapping */
u64 iommu_bitmap; /* bitmap of iommu(s) that the domain uses */
struct list_head mapped_rmrrs;