This is done so PVH guests can use PHYSDEVOP_pirq_eoi_gmfn_v{1/2}.
Update users of this fields, to reflect that this has been moved and
it is now also available to other kind of guests.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Move auto_unmask ahead of the other two fields, to reduce padding.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
*/
destroy_gdt(v);
}
+ }
- if ( d->arch.pv_domain.pirq_eoi_map != NULL )
- {
- unmap_domain_page_global(d->arch.pv_domain.pirq_eoi_map);
- put_page_and_type(
- mfn_to_page(d->arch.pv_domain.pirq_eoi_map_mfn));
- d->arch.pv_domain.pirq_eoi_map = NULL;
- d->arch.pv_domain.auto_unmask = 0;
- }
+ if ( d->arch.pirq_eoi_map != NULL )
+ {
+ unmap_domain_page_global(d->arch.pirq_eoi_map);
+ put_page_and_type(mfn_to_page(d->arch.pirq_eoi_map_mfn));
+ d->arch.pirq_eoi_map = NULL;
+ d->arch.auto_unmask = 0;
}
d->arch.relmem = RELMEM_shared;
case PHYSDEVOP_irq_status_query:
case PHYSDEVOP_get_free_pirq:
return do_physdev_op(cmd, arg);
-
- /* pvh fixme: coming soon */
- case PHYSDEVOP_pirq_eoi_gmfn_v1:
- case PHYSDEVOP_pirq_eoi_gmfn_v2:
- return -ENOSYS;
-
}
}
static inline void set_pirq_eoi(struct domain *d, unsigned int irq)
{
- if ( is_pv_domain(d) && d->arch.pv_domain.pirq_eoi_map )
- set_bit(irq, d->arch.pv_domain.pirq_eoi_map);
+ if ( d->arch.pirq_eoi_map )
+ set_bit(irq, d->arch.pirq_eoi_map);
}
static inline void clear_pirq_eoi(struct domain *d, unsigned int irq)
{
- if ( is_pv_domain(d) && d->arch.pv_domain.pirq_eoi_map )
- clear_bit(irq, d->arch.pv_domain.pirq_eoi_map);
+ if ( d->arch.pirq_eoi_map )
+ clear_bit(irq, d->arch.pirq_eoi_map);
}
static void set_eoi_ready(void *data);
spin_unlock(&v->domain->event_lock);
break;
}
- if ( is_pv_domain(v->domain) &&
- v->domain->arch.pv_domain.auto_unmask )
+ if ( v->domain->arch.auto_unmask )
evtchn_unmask(pirq->evtchn);
if ( is_pv_domain(v->domain) ||
domain_pirq_to_irq(v->domain, eoi.irq) > 0 )
}
mfn = page_to_mfn(page);
- if ( cmpxchg(&v->domain->arch.pv_domain.pirq_eoi_map_mfn,
+ if ( cmpxchg(&v->domain->arch.pirq_eoi_map_mfn,
0, mfn) != 0 )
{
put_page_and_type(mfn_to_page(mfn));
break;
}
- v->domain->arch.pv_domain.pirq_eoi_map = map_domain_page_global(mfn);
- if ( v->domain->arch.pv_domain.pirq_eoi_map == NULL )
+ v->domain->arch.pirq_eoi_map = map_domain_page_global(mfn);
+ if ( v->domain->arch.pirq_eoi_map == NULL )
{
- v->domain->arch.pv_domain.pirq_eoi_map_mfn = 0;
+ v->domain->arch.pirq_eoi_map_mfn = 0;
put_page_and_type(mfn_to_page(mfn));
ret = -ENOSPC;
break;
}
if ( cmd == PHYSDEVOP_pirq_eoi_gmfn_v1 )
- v->domain->arch.pv_domain.auto_unmask = 1;
+ v->domain->arch.auto_unmask = 1;
ret = 0;
break;
{
l1_pgentry_t **gdt_ldt_l1tab;
- /* Shared page for notifying that explicit PIRQ EOI is required. */
- unsigned long *pirq_eoi_map;
- unsigned long pirq_eoi_map_mfn;
- /* set auto_unmask to 1 if you want PHYSDEVOP_eoi to automatically
- * unmask the event channel */
- bool_t auto_unmask;
-
/* map_domain_page() mapping cache. */
struct mapcache_domain mapcache;
};
spinlock_t e820_lock;
struct e820entry *e820;
unsigned int nr_e820;
+
+ /* set auto_unmask to 1 if you want PHYSDEVOP_eoi to automatically
+ * unmask the event channel */
+ bool_t auto_unmask;
+ /* Shared page for notifying that explicit PIRQ EOI is required. */
+ unsigned long *pirq_eoi_map;
+ unsigned long pirq_eoi_map_mfn;
} __cacheline_aligned;
#define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list))