The trailing _domain suffix is redundant, but adds to code volume. Drop it.
Reflow lines as appropriate, and switch to using the new XFREE/etc wrappers
where applicable.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Razvan Cojocaru <rcojocaru@bitdefender.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Julien Grall <julien.grall@arm.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
val |= MASK_INSR(HVM_PARAM_CALLBACK_TYPE_PPI_FLAG_LOW_LEVEL,
HVM_PARAM_CALLBACK_TYPE_PPI_FLAG_MASK);
val |= d->arch.evtchn_irq;
- d->arch.hvm_domain.params[HVM_PARAM_CALLBACK_IRQ] = val;
+ d->arch.hvm.params[HVM_PARAM_CALLBACK_IRQ] = val;
}
static void __init find_gnttab_region(struct domain *d,
if ( op == HVMOP_set_param )
{
- d->arch.hvm_domain.params[a.index] = a.value;
+ d->arch.hvm.params[a.index] = a.value;
}
else
{
- a.value = d->arch.hvm_domain.params[a.index];
+ a.value = d->arch.hvm.params[a.index];
rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
}
/* Need to determine if HAP is enabled before initialising paging */
if ( is_hvm_domain(d) )
- d->arch.hvm_domain.hap_enabled =
+ d->arch.hvm.hap_enabled =
hvm_hap_supported() && (config->flags & XEN_DOMCTL_CDF_hap);
if ( (rc = paging_domain_init(d, config->flags)) != 0 )
unsigned int fmp = domctl->u.ioport_mapping.first_mport;
unsigned int np = domctl->u.ioport_mapping.nr_ports;
unsigned int add = domctl->u.ioport_mapping.add_mapping;
- struct hvm_domain *hvm_domain;
+ struct hvm_domain *hvm;
struct g2m_ioport *g2m_ioport;
int found = 0;
if ( ret )
break;
- hvm_domain = &d->arch.hvm_domain;
+ hvm = &d->arch.hvm;
if ( add )
{
printk(XENLOG_G_INFO
"ioport_map:add: dom%d gport=%x mport=%x nr=%x\n",
d->domain_id, fgp, fmp, np);
- list_for_each_entry(g2m_ioport, &hvm_domain->g2m_ioport_list, list)
+ list_for_each_entry(g2m_ioport, &hvm->g2m_ioport_list, list)
if (g2m_ioport->mport == fmp )
{
g2m_ioport->gport = fgp;
g2m_ioport->gport = fgp;
g2m_ioport->mport = fmp;
g2m_ioport->np = np;
- list_add_tail(&g2m_ioport->list, &hvm_domain->g2m_ioport_list);
+ list_add_tail(&g2m_ioport->list, &hvm->g2m_ioport_list);
}
if ( !ret )
ret = ioports_permit_access(d, fmp, fmp + np - 1);
printk(XENLOG_G_INFO
"ioport_map:remove: dom%d gport=%x mport=%x nr=%x\n",
d->domain_id, fgp, fmp, np);
- list_for_each_entry(g2m_ioport, &hvm_domain->g2m_ioport_list, list)
+ list_for_each_entry(g2m_ioport, &hvm->g2m_ioport_list, list)
if ( g2m_ioport->mport == fmp )
{
list_del(&g2m_ioport->list);
if ( hvm_copy_to_guest_phys(gaddr, NULL, HVM_VM86_TSS_SIZE, v) !=
HVMTRANS_okay )
printk("Unable to zero VM86 TSS area\n");
- d->arch.hvm_domain.params[HVM_PARAM_VM86_TSS_SIZED] =
+ d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] =
VM86_TSS_UPDATED | ((uint64_t)HVM_VM86_TSS_SIZE << 32) | gaddr;
if ( pvh_add_mem_range(d, gaddr, gaddr + HVM_VM86_TSS_SIZE,
E820_RESERVED) )
write_32bit_pse_identmap(ident_pt);
unmap_domain_page(ident_pt);
put_page(mfn_to_page(mfn));
- d->arch.hvm_domain.params[HVM_PARAM_IDENT_PT] = gaddr;
+ d->arch.hvm.params[HVM_PARAM_IDENT_PT] = gaddr;
if ( pvh_add_mem_range(d, gaddr, gaddr + PAGE_SIZE, E820_RESERVED) )
printk("Unable to set identity page tables as reserved in the memory map\n");
v->arch.hvm_vcpu.cache_tsc_offset =
d->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
- d->arch.hvm_domain.sync_tsc);
+ d->arch.hvm.sync_tsc);
paging_update_paging_modes(v);
#include <xen/event.h>
#include <xen/trace.h>
-#define domain_vhpet(x) (&(x)->arch.hvm_domain.pl_time->vhpet)
+#define domain_vhpet(x) (&(x)->arch.hvm.pl_time->vhpet)
#define vcpu_vhpet(x) (domain_vhpet((x)->domain))
#define vhpet_domain(x) (container_of(x, struct pl_time, vhpet)->domain)
#define vhpet_vcpu(x) (pt_global_vcpu_target(vhpet_domain(x)))
unsigned long result;
uint64_t val;
- if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_HPET_ENABLED] )
+ if ( !v->domain->arch.hvm.params[HVM_PARAM_HPET_ENABLED] )
{
result = ~0ul;
goto out;
#define set_start_timer(n) (__set_bit((n), &start_timers))
#define set_restart_timer(n) (set_stop_timer(n),set_start_timer(n))
- if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_HPET_ENABLED] )
+ if ( !v->domain->arch.hvm.params[HVM_PARAM_HPET_ENABLED] )
goto out;
addr &= HPET_MMAP_SIZE-1;
hpet_set(domain_vhpet(d));
register_mmio_handler(d, &hpet_mmio_ops);
- d->arch.hvm_domain.params[HVM_PARAM_HPET_ENABLED] = 1;
+ d->arch.hvm.params[HVM_PARAM_HPET_ENABLED] = 1;
}
void hpet_deinit(struct domain *d)
u64 hvm_scale_tsc(const struct domain *d, u64 tsc)
{
- u64 ratio = d->arch.hvm_domain.tsc_scaling_ratio;
+ u64 ratio = d->arch.hvm.tsc_scaling_ratio;
u64 dummy;
if ( ratio == hvm_default_tsc_scaling_ratio )
return -EINVAL;
}
- spin_lock_init(&d->arch.hvm_domain.irq_lock);
- spin_lock_init(&d->arch.hvm_domain.uc_lock);
- spin_lock_init(&d->arch.hvm_domain.write_map.lock);
- rwlock_init(&d->arch.hvm_domain.mmcfg_lock);
- INIT_LIST_HEAD(&d->arch.hvm_domain.write_map.list);
- INIT_LIST_HEAD(&d->arch.hvm_domain.g2m_ioport_list);
- INIT_LIST_HEAD(&d->arch.hvm_domain.mmcfg_regions);
- INIT_LIST_HEAD(&d->arch.hvm_domain.msix_tables);
+ spin_lock_init(&d->arch.hvm.irq_lock);
+ spin_lock_init(&d->arch.hvm.uc_lock);
+ spin_lock_init(&d->arch.hvm.write_map.lock);
+ rwlock_init(&d->arch.hvm.mmcfg_lock);
+ INIT_LIST_HEAD(&d->arch.hvm.write_map.list);
+ INIT_LIST_HEAD(&d->arch.hvm.g2m_ioport_list);
+ INIT_LIST_HEAD(&d->arch.hvm.mmcfg_regions);
+ INIT_LIST_HEAD(&d->arch.hvm.msix_tables);
rc = create_perdomain_mapping(d, PERDOMAIN_VIRT_START, 0, NULL, NULL);
if ( rc )
goto fail0;
nr_gsis = is_hardware_domain(d) ? nr_irqs_gsi : NR_HVM_DOMU_IRQS;
- d->arch.hvm_domain.pl_time = xzalloc(struct pl_time);
- d->arch.hvm_domain.params = xzalloc_array(uint64_t, HVM_NR_PARAMS);
- d->arch.hvm_domain.io_handler = xzalloc_array(struct hvm_io_handler,
- NR_IO_HANDLERS);
- d->arch.hvm_domain.irq = xzalloc_bytes(hvm_irq_size(nr_gsis));
+ d->arch.hvm.pl_time = xzalloc(struct pl_time);
+ d->arch.hvm.params = xzalloc_array(uint64_t, HVM_NR_PARAMS);
+ d->arch.hvm.io_handler = xzalloc_array(struct hvm_io_handler,
+ NR_IO_HANDLERS);
+ d->arch.hvm.irq = xzalloc_bytes(hvm_irq_size(nr_gsis));
rc = -ENOMEM;
- if ( !d->arch.hvm_domain.pl_time || !d->arch.hvm_domain.irq ||
- !d->arch.hvm_domain.params || !d->arch.hvm_domain.io_handler )
+ if ( !d->arch.hvm.pl_time || !d->arch.hvm.irq ||
+ !d->arch.hvm.params || !d->arch.hvm.io_handler )
goto fail1;
/* Set the number of GSIs */
ASSERT(hvm_domain_irq(d)->nr_gsis >= NR_ISAIRQS);
/* need link to containing domain */
- d->arch.hvm_domain.pl_time->domain = d;
+ d->arch.hvm.pl_time->domain = d;
/* Set the default IO Bitmap. */
if ( is_hardware_domain(d) )
{
- d->arch.hvm_domain.io_bitmap = _xmalloc(HVM_IOBITMAP_SIZE, PAGE_SIZE);
- if ( d->arch.hvm_domain.io_bitmap == NULL )
+ d->arch.hvm.io_bitmap = _xmalloc(HVM_IOBITMAP_SIZE, PAGE_SIZE);
+ if ( d->arch.hvm.io_bitmap == NULL )
{
rc = -ENOMEM;
goto fail1;
}
- memset(d->arch.hvm_domain.io_bitmap, ~0, HVM_IOBITMAP_SIZE);
+ memset(d->arch.hvm.io_bitmap, ~0, HVM_IOBITMAP_SIZE);
}
else
- d->arch.hvm_domain.io_bitmap = hvm_io_bitmap;
+ d->arch.hvm.io_bitmap = hvm_io_bitmap;
register_g2m_portio_handler(d);
register_vpci_portio_handler(d);
hvm_init_guest_time(d);
- d->arch.hvm_domain.params[HVM_PARAM_TRIPLE_FAULT_REASON] = SHUTDOWN_reboot;
+ d->arch.hvm.params[HVM_PARAM_TRIPLE_FAULT_REASON] = SHUTDOWN_reboot;
vpic_init(d);
register_portio_handler(d, 0xe9, 1, hvm_print_line);
if ( hvm_tsc_scaling_supported )
- d->arch.hvm_domain.tsc_scaling_ratio = hvm_default_tsc_scaling_ratio;
+ d->arch.hvm.tsc_scaling_ratio = hvm_default_tsc_scaling_ratio;
rc = hvm_funcs.domain_initialise(d);
if ( rc != 0 )
vioapic_deinit(d);
fail1:
if ( is_hardware_domain(d) )
- xfree(d->arch.hvm_domain.io_bitmap);
- xfree(d->arch.hvm_domain.io_handler);
- xfree(d->arch.hvm_domain.params);
- xfree(d->arch.hvm_domain.pl_time);
- xfree(d->arch.hvm_domain.irq);
+ xfree(d->arch.hvm.io_bitmap);
+ xfree(d->arch.hvm.io_handler);
+ xfree(d->arch.hvm.params);
+ xfree(d->arch.hvm.pl_time);
+ xfree(d->arch.hvm.irq);
fail0:
hvm_destroy_cacheattr_region_list(d);
destroy_perdomain_mapping(d, PERDOMAIN_VIRT_START, 0);
struct list_head *ioport_list, *tmp;
struct g2m_ioport *ioport;
- xfree(d->arch.hvm_domain.io_handler);
- d->arch.hvm_domain.io_handler = NULL;
-
- xfree(d->arch.hvm_domain.params);
- d->arch.hvm_domain.params = NULL;
+ XFREE(d->arch.hvm.io_handler);
+ XFREE(d->arch.hvm.params);
hvm_destroy_cacheattr_region_list(d);
stdvga_deinit(d);
vioapic_deinit(d);
- xfree(d->arch.hvm_domain.pl_time);
- d->arch.hvm_domain.pl_time = NULL;
-
- xfree(d->arch.hvm_domain.irq);
- d->arch.hvm_domain.irq = NULL;
+ XFREE(d->arch.hvm.pl_time);
+ XFREE(d->arch.hvm.irq);
- list_for_each_safe ( ioport_list, tmp,
- &d->arch.hvm_domain.g2m_ioport_list )
+ list_for_each_safe ( ioport_list, tmp, &d->arch.hvm.g2m_ioport_list )
{
ioport = list_entry(ioport_list, struct g2m_ioport, list);
list_del(&ioport->list);
/* Architecture-specific vmcs/vmcb bits */
hvm_funcs.save_cpu_ctxt(v, &ctxt);
- ctxt.tsc = hvm_get_guest_tsc_fixed(v, d->arch.hvm_domain.sync_tsc);
+ ctxt.tsc = hvm_get_guest_tsc_fixed(v, d->arch.hvm.sync_tsc);
ctxt.msr_tsc_aux = hvm_msr_tsc_aux(v);
v->arch.hvm_vcpu.msr_tsc_aux = ctxt.msr_tsc_aux;
- hvm_set_guest_tsc_fixed(v, ctxt.tsc, d->arch.hvm_domain.sync_tsc);
+ hvm_set_guest_tsc_fixed(v, ctxt.tsc, d->arch.hvm.sync_tsc);
seg.limit = ctxt.idtr_limit;
seg.base = ctxt.idtr_base;
{
struct vcpu *v = current;
struct domain *d = v->domain;
- u8 reason = d->arch.hvm_domain.params[HVM_PARAM_TRIPLE_FAULT_REASON];
+ u8 reason = d->arch.hvm.params[HVM_PARAM_TRIPLE_FAULT_REASON];
gprintk(XENLOG_INFO,
"Triple fault - invoking HVM shutdown action %d\n",
static void hvm_set_uc_mode(struct vcpu *v, bool_t is_in_uc_mode)
{
- v->domain->arch.hvm_domain.is_in_uc_mode = is_in_uc_mode;
+ v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
shadow_blow_tables_per_domain(v->domain);
}
if ( value & X86_CR0_CD )
{
/* Entering no fill cache mode. */
- spin_lock(&v->domain->arch.hvm_domain.uc_lock);
+ spin_lock(&v->domain->arch.hvm.uc_lock);
v->arch.hvm_vcpu.cache_mode = NO_FILL_CACHE_MODE;
- if ( !v->domain->arch.hvm_domain.is_in_uc_mode )
+ if ( !v->domain->arch.hvm.is_in_uc_mode )
{
domain_pause_nosync(v->domain);
domain_unpause(v->domain);
}
- spin_unlock(&v->domain->arch.hvm_domain.uc_lock);
+ spin_unlock(&v->domain->arch.hvm.uc_lock);
}
else if ( !(value & X86_CR0_CD) &&
(v->arch.hvm_vcpu.cache_mode == NO_FILL_CACHE_MODE) )
{
/* Exit from no fill cache mode. */
- spin_lock(&v->domain->arch.hvm_domain.uc_lock);
+ spin_lock(&v->domain->arch.hvm.uc_lock);
v->arch.hvm_vcpu.cache_mode = NORMAL_CACHE_MODE;
if ( domain_exit_uc_mode(v) )
hvm_set_uc_mode(v, 0);
- spin_unlock(&v->domain->arch.hvm_domain.uc_lock);
+ spin_unlock(&v->domain->arch.hvm.uc_lock);
}
}
return NULL;
}
track->page = page;
- spin_lock(&d->arch.hvm_domain.write_map.lock);
- list_add_tail(&track->list, &d->arch.hvm_domain.write_map.list);
- spin_unlock(&d->arch.hvm_domain.write_map.lock);
+ spin_lock(&d->arch.hvm.write_map.lock);
+ list_add_tail(&track->list, &d->arch.hvm.write_map.list);
+ spin_unlock(&d->arch.hvm.write_map.lock);
}
map = __map_domain_page_global(page);
struct hvm_write_map *track;
unmap_domain_page_global(p);
- spin_lock(&d->arch.hvm_domain.write_map.lock);
- list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
+ spin_lock(&d->arch.hvm.write_map.lock);
+ list_for_each_entry(track, &d->arch.hvm.write_map.list, list)
if ( track->page == page )
{
paging_mark_dirty(d, mfn);
xfree(track);
break;
}
- spin_unlock(&d->arch.hvm_domain.write_map.lock);
+ spin_unlock(&d->arch.hvm.write_map.lock);
}
put_page(page);
{
struct hvm_write_map *track;
- spin_lock(&d->arch.hvm_domain.write_map.lock);
- list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
+ spin_lock(&d->arch.hvm.write_map.lock);
+ list_for_each_entry(track, &d->arch.hvm.write_map.list, list)
paging_mark_dirty(d, page_to_mfn(track->page));
- spin_unlock(&d->arch.hvm_domain.write_map.lock);
+ spin_unlock(&d->arch.hvm.write_map.lock);
}
static void *hvm_map_entry(unsigned long va, bool_t *writable)
v->arch.hvm_vcpu.cache_tsc_offset =
v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
hvm_set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
- d->arch.hvm_domain.sync_tsc);
+ d->arch.hvm.sync_tsc);
v->arch.hvm_vcpu.msr_tsc_adjust = 0;
domain_lock(d);
if ( d->is_dying || (d->vcpu == NULL) || (d->vcpu[0] == NULL) ||
- test_and_set_bool(d->arch.hvm_domain.is_s3_suspended) )
+ test_and_set_bool(d->arch.hvm.is_s3_suspended) )
{
domain_unlock(d);
domain_unpause(d);
static void hvm_s3_resume(struct domain *d)
{
- if ( test_and_clear_bool(d->arch.hvm_domain.is_s3_suspended) )
+ if ( test_and_clear_bool(d->arch.hvm.is_s3_suspended) )
{
struct vcpu *v;
static int hvm_allow_set_param(struct domain *d,
const struct xen_hvm_param *a)
{
- uint64_t value = d->arch.hvm_domain.params[a->index];
+ uint64_t value = d->arch.hvm.params[a->index];
int rc;
rc = xsm_hvm_param(XSM_TARGET, d, HVMOP_set_param);
*/
if ( !paging_mode_hap(d) || !cpu_has_vmx )
{
- d->arch.hvm_domain.params[a.index] = a.value;
+ d->arch.hvm.params[a.index] = a.value;
break;
}
rc = 0;
domain_pause(d);
- d->arch.hvm_domain.params[a.index] = a.value;
+ d->arch.hvm.params[a.index] = a.value;
for_each_vcpu ( d, v )
paging_update_cr3(v, false);
domain_unpause(d);
if ( !paging_mode_hap(d) && a.value )
rc = -EINVAL;
if ( a.value &&
- d->arch.hvm_domain.params[HVM_PARAM_ALTP2M] )
+ d->arch.hvm.params[HVM_PARAM_ALTP2M] )
rc = -EINVAL;
/* Set up NHVM state for any vcpus that are already up. */
if ( a.value &&
- !d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM] )
+ !d->arch.hvm.params[HVM_PARAM_NESTEDHVM] )
for_each_vcpu(d, v)
if ( rc == 0 )
rc = nestedhvm_vcpu_initialise(v);
if ( a.value > XEN_ALTP2M_limited )
rc = -EINVAL;
if ( a.value &&
- d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM] )
+ d->arch.hvm.params[HVM_PARAM_NESTEDHVM] )
rc = -EINVAL;
break;
case HVM_PARAM_BUFIOREQ_EVTCHN:
rc = -EINVAL;
break;
case HVM_PARAM_IOREQ_SERVER_PFN:
- d->arch.hvm_domain.ioreq_gfn.base = a.value;
+ d->arch.hvm.ioreq_gfn.base = a.value;
break;
case HVM_PARAM_NR_IOREQ_SERVER_PAGES:
{
unsigned int i;
if ( a.value == 0 ||
- a.value > sizeof(d->arch.hvm_domain.ioreq_gfn.mask) * 8 )
+ a.value > sizeof(d->arch.hvm.ioreq_gfn.mask) * 8 )
{
rc = -EINVAL;
break;
}
for ( i = 0; i < a.value; i++ )
- set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
+ set_bit(i, &d->arch.hvm.ioreq_gfn.mask);
break;
}
if ( rc != 0 )
goto out;
- d->arch.hvm_domain.params[a.index] = a.value;
+ d->arch.hvm.params[a.index] = a.value;
HVM_DBG_LOG(DBG_LEVEL_HCALL, "set param %u = %"PRIx64,
a.index, a.value);
switch ( a.index )
{
case HVM_PARAM_ACPI_S_STATE:
- a.value = d->arch.hvm_domain.is_s3_suspended ? 3 : 0;
+ a.value = d->arch.hvm.is_s3_suspended ? 3 : 0;
break;
case HVM_PARAM_VM86_TSS:
- a.value = (uint32_t)d->arch.hvm_domain.params[HVM_PARAM_VM86_TSS_SIZED];
+ a.value = (uint32_t)d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED];
break;
case HVM_PARAM_VM86_TSS_SIZED:
- a.value = d->arch.hvm_domain.params[HVM_PARAM_VM86_TSS_SIZED] &
+ a.value = d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] &
~VM86_TSS_UPDATED;
break;
/*FALLTHRU*/
default:
- a.value = d->arch.hvm_domain.params[a.index];
+ a.value = d->arch.hvm.params[a.index];
break;
}
goto out;
}
- mode = d->arch.hvm_domain.params[HVM_PARAM_ALTP2M];
+ mode = d->arch.hvm.params[HVM_PARAM_ALTP2M];
if ( XEN_ALTP2M_disabled == mode )
{
rc = compat_memory_op(cmd, arg);
if ( (cmd & MEMOP_CMD_MASK) == XENMEM_decrease_reservation )
- curr->domain->arch.hvm_domain.qemu_mapcache_invalidate = true;
+ curr->domain->arch.hvm.qemu_mapcache_invalidate = true;
return rc;
}
if ( curr->hcall_preempted )
return HVM_HCALL_preempted;
- if ( unlikely(currd->arch.hvm_domain.qemu_mapcache_invalidate) &&
- test_and_clear_bool(currd->arch.hvm_domain.qemu_mapcache_invalidate) )
+ if ( unlikely(currd->arch.hvm.qemu_mapcache_invalidate) &&
+ test_and_clear_bool(currd->arch.hvm.qemu_mapcache_invalidate) )
send_invalidate_req();
return HVM_HCALL_completed;
BUG_ON((p->type != IOREQ_TYPE_PIO) &&
(p->type != IOREQ_TYPE_COPY));
- for ( i = 0; i < curr_d->arch.hvm_domain.io_handler_count; i++ )
+ for ( i = 0; i < curr_d->arch.hvm.io_handler_count; i++ )
{
const struct hvm_io_handler *handler =
- &curr_d->arch.hvm_domain.io_handler[i];
+ &curr_d->arch.hvm.io_handler[i];
const struct hvm_io_ops *ops = handler->ops;
if ( handler->type != p->type )
struct hvm_io_handler *hvm_next_io_handler(struct domain *d)
{
- unsigned int i = d->arch.hvm_domain.io_handler_count++;
+ unsigned int i = d->arch.hvm.io_handler_count++;
- ASSERT(d->arch.hvm_domain.io_handler);
+ ASSERT(d->arch.hvm.io_handler);
if ( i == NR_IO_HANDLERS )
{
return NULL;
}
- return &d->arch.hvm_domain.io_handler[i];
+ return &d->arch.hvm.io_handler[i];
}
void register_mmio_handler(struct domain *d,
{
unsigned int i;
- for ( i = 0; i < d->arch.hvm_domain.io_handler_count; i++ )
+ for ( i = 0; i < d->arch.hvm.io_handler_count; i++ )
{
struct hvm_io_handler *handler =
- &d->arch.hvm_domain.io_handler[i];
+ &d->arch.hvm.io_handler[i];
if ( handler->type != IOREQ_TYPE_PIO )
continue;
const ioreq_t *p)
{
struct vcpu *curr = current;
- const struct hvm_domain *hvm_domain = &curr->domain->arch.hvm_domain;
+ const struct hvm_domain *hvm = &curr->domain->arch.hvm;
struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
struct g2m_ioport *g2m_ioport;
unsigned int start, end;
- list_for_each_entry( g2m_ioport, &hvm_domain->g2m_ioport_list, list )
+ list_for_each_entry( g2m_ioport, &hvm->g2m_ioport_list, list )
{
start = g2m_ioport->gport;
end = start + g2m_ioport->np;
if ( addr == 0xcf8 )
{
ASSERT(size == 4);
- *data = d->arch.hvm_domain.pci_cf8;
+ *data = d->arch.hvm.pci_cf8;
return X86EMUL_OKAY;
}
ASSERT((addr & ~3) == 0xcfc);
- cf8 = ACCESS_ONCE(d->arch.hvm_domain.pci_cf8);
+ cf8 = ACCESS_ONCE(d->arch.hvm.pci_cf8);
if ( !CF8_ENABLED(cf8) )
return X86EMUL_UNHANDLEABLE;
if ( addr == 0xcf8 )
{
ASSERT(size == 4);
- d->arch.hvm_domain.pci_cf8 = data;
+ d->arch.hvm.pci_cf8 = data;
return X86EMUL_OKAY;
}
ASSERT((addr & ~3) == 0xcfc);
- cf8 = ACCESS_ONCE(d->arch.hvm_domain.pci_cf8);
+ cf8 = ACCESS_ONCE(d->arch.hvm.pci_cf8);
if ( !CF8_ENABLED(cf8) )
return X86EMUL_UNHANDLEABLE;
{
const struct hvm_mmcfg *mmcfg;
- list_for_each_entry ( mmcfg, &d->arch.hvm_domain.mmcfg_regions, next )
+ list_for_each_entry ( mmcfg, &d->arch.hvm.mmcfg_regions, next )
if ( addr >= mmcfg->addr && addr < mmcfg->addr + mmcfg->size )
return mmcfg;
struct domain *d = v->domain;
bool found;
- read_lock(&d->arch.hvm_domain.mmcfg_lock);
+ read_lock(&d->arch.hvm.mmcfg_lock);
found = vpci_mmcfg_find(d, addr);
- read_unlock(&d->arch.hvm_domain.mmcfg_lock);
+ read_unlock(&d->arch.hvm.mmcfg_lock);
return found;
}
*data = ~0ul;
- read_lock(&d->arch.hvm_domain.mmcfg_lock);
+ read_lock(&d->arch.hvm.mmcfg_lock);
mmcfg = vpci_mmcfg_find(d, addr);
if ( !mmcfg )
{
- read_unlock(&d->arch.hvm_domain.mmcfg_lock);
+ read_unlock(&d->arch.hvm.mmcfg_lock);
return X86EMUL_RETRY;
}
reg = vpci_mmcfg_decode_addr(mmcfg, addr, &sbdf);
- read_unlock(&d->arch.hvm_domain.mmcfg_lock);
+ read_unlock(&d->arch.hvm.mmcfg_lock);
if ( !vpci_access_allowed(reg, len) ||
(reg + len) > PCI_CFG_SPACE_EXP_SIZE )
unsigned int reg;
pci_sbdf_t sbdf;
- read_lock(&d->arch.hvm_domain.mmcfg_lock);
+ read_lock(&d->arch.hvm.mmcfg_lock);
mmcfg = vpci_mmcfg_find(d, addr);
if ( !mmcfg )
{
- read_unlock(&d->arch.hvm_domain.mmcfg_lock);
+ read_unlock(&d->arch.hvm.mmcfg_lock);
return X86EMUL_RETRY;
}
reg = vpci_mmcfg_decode_addr(mmcfg, addr, &sbdf);
- read_unlock(&d->arch.hvm_domain.mmcfg_lock);
+ read_unlock(&d->arch.hvm.mmcfg_lock);
if ( !vpci_access_allowed(reg, len) ||
(reg + len) > PCI_CFG_SPACE_EXP_SIZE )
new->segment = seg;
new->size = (end_bus - start_bus + 1) << 20;
- write_lock(&d->arch.hvm_domain.mmcfg_lock);
- list_for_each_entry ( mmcfg, &d->arch.hvm_domain.mmcfg_regions, next )
+ write_lock(&d->arch.hvm.mmcfg_lock);
+ list_for_each_entry ( mmcfg, &d->arch.hvm.mmcfg_regions, next )
if ( new->addr < mmcfg->addr + mmcfg->size &&
mmcfg->addr < new->addr + new->size )
{
new->segment == mmcfg->segment &&
new->size == mmcfg->size )
ret = 0;
- write_unlock(&d->arch.hvm_domain.mmcfg_lock);
+ write_unlock(&d->arch.hvm.mmcfg_lock);
xfree(new);
return ret;
}
- if ( list_empty(&d->arch.hvm_domain.mmcfg_regions) )
+ if ( list_empty(&d->arch.hvm.mmcfg_regions) )
register_mmio_handler(d, &vpci_mmcfg_ops);
- list_add(&new->next, &d->arch.hvm_domain.mmcfg_regions);
- write_unlock(&d->arch.hvm_domain.mmcfg_lock);
+ list_add(&new->next, &d->arch.hvm.mmcfg_regions);
+ write_unlock(&d->arch.hvm.mmcfg_lock);
return 0;
}
void destroy_vpci_mmcfg(struct domain *d)
{
- struct list_head *mmcfg_regions = &d->arch.hvm_domain.mmcfg_regions;
+ struct list_head *mmcfg_regions = &d->arch.hvm.mmcfg_regions;
- write_lock(&d->arch.hvm_domain.mmcfg_lock);
+ write_lock(&d->arch.hvm.mmcfg_lock);
while ( !list_empty(mmcfg_regions) )
{
struct hvm_mmcfg *mmcfg = list_first_entry(mmcfg_regions,
list_del(&mmcfg->next);
xfree(mmcfg);
}
- write_unlock(&d->arch.hvm_domain.mmcfg_lock);
+ write_unlock(&d->arch.hvm.mmcfg_lock);
}
/*
struct hvm_ioreq_server *s)
{
ASSERT(id < MAX_NR_IOREQ_SERVERS);
- ASSERT(!s || !d->arch.hvm_domain.ioreq_server.server[id]);
+ ASSERT(!s || !d->arch.hvm.ioreq_server.server[id]);
- d->arch.hvm_domain.ioreq_server.server[id] = s;
+ d->arch.hvm.ioreq_server.server[id] = s;
}
#define GET_IOREQ_SERVER(d, id) \
- (d)->arch.hvm_domain.ioreq_server.server[id]
+ (d)->arch.hvm.ioreq_server.server[id]
static struct hvm_ioreq_server *get_ioreq_server(const struct domain *d,
unsigned int id)
ASSERT(!IS_DEFAULT(s));
- for ( i = 0; i < sizeof(d->arch.hvm_domain.ioreq_gfn.mask) * 8; i++ )
+ for ( i = 0; i < sizeof(d->arch.hvm.ioreq_gfn.mask) * 8; i++ )
{
- if ( test_and_clear_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask) )
- return _gfn(d->arch.hvm_domain.ioreq_gfn.base + i);
+ if ( test_and_clear_bit(i, &d->arch.hvm.ioreq_gfn.mask) )
+ return _gfn(d->arch.hvm.ioreq_gfn.base + i);
}
return INVALID_GFN;
static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s, gfn_t gfn)
{
struct domain *d = s->target;
- unsigned int i = gfn_x(gfn) - d->arch.hvm_domain.ioreq_gfn.base;
+ unsigned int i = gfn_x(gfn) - d->arch.hvm.ioreq_gfn.base;
ASSERT(!IS_DEFAULT(s));
ASSERT(!gfn_eq(gfn, INVALID_GFN));
- set_bit(i, &d->arch.hvm_domain.ioreq_gfn.mask);
+ set_bit(i, &d->arch.hvm.ioreq_gfn.mask);
}
static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
if ( IS_DEFAULT(s) )
iorp->gfn = _gfn(buf ?
- d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_PFN] :
- d->arch.hvm_domain.params[HVM_PARAM_IOREQ_PFN]);
+ d->arch.hvm.params[HVM_PARAM_BUFIOREQ_PFN] :
+ d->arch.hvm.params[HVM_PARAM_IOREQ_PFN]);
else
iorp->gfn = hvm_alloc_ioreq_gfn(s);
unsigned int id;
bool found = false;
- spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
FOR_EACH_IOREQ_SERVER(d, id, s)
{
}
}
- spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
return found;
}
s->bufioreq_evtchn = rc;
if ( IS_DEFAULT(s) )
- d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] =
+ d->arch.hvm.params[HVM_PARAM_BUFIOREQ_EVTCHN] =
s->bufioreq_evtchn;
}
return -ENOMEM;
domain_pause(d);
- spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
if ( is_default )
{
if ( id )
*id = i;
- spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
domain_unpause(d);
return 0;
fail:
- spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
domain_unpause(d);
xfree(s);
if ( id == DEFAULT_IOSERVID )
return -EPERM;
- spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
s = get_ioreq_server(d, id);
rc = 0;
out:
- spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
return rc;
}
if ( id == DEFAULT_IOSERVID )
return -EOPNOTSUPP;
- spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
s = get_ioreq_server(d, id);
rc = 0;
out:
- spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
return rc;
}
if ( !is_hvm_domain(d) )
return -EINVAL;
- spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
s = get_ioreq_server(d, id);
}
out:
- spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
return rc;
}
if ( id == DEFAULT_IOSERVID )
return -EOPNOTSUPP;
- spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
s = get_ioreq_server(d, id);
rc = rangeset_add_range(r, start, end);
out:
- spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
return rc;
}
if ( id == DEFAULT_IOSERVID )
return -EOPNOTSUPP;
- spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
s = get_ioreq_server(d, id);
rc = rangeset_remove_range(r, start, end);
out:
- spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
return rc;
}
if ( flags & ~XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
return -EINVAL;
- spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
s = get_ioreq_server(d, id);
rc = p2m_set_ioreq_server(d, flags, s);
out:
- spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
if ( rc == 0 && flags == 0 )
{
if ( id == DEFAULT_IOSERVID )
return -EOPNOTSUPP;
- spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
s = get_ioreq_server(d, id);
rc = 0;
out:
- spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
return rc;
}
unsigned int id;
int rc;
- spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
FOR_EACH_IOREQ_SERVER(d, id, s)
{
goto fail;
}
- spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
return 0;
hvm_ioreq_server_remove_vcpu(s, v);
}
- spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
return rc;
}
struct hvm_ioreq_server *s;
unsigned int id;
- spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
FOR_EACH_IOREQ_SERVER(d, id, s)
hvm_ioreq_server_remove_vcpu(s, v);
- spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
}
void hvm_destroy_all_ioreq_servers(struct domain *d)
struct hvm_ioreq_server *s;
unsigned int id;
- spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
/* No need to domain_pause() as the domain is being torn down */
xfree(s);
}
- spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
}
struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
return GET_IOREQ_SERVER(d, DEFAULT_IOSERVID);
- cf8 = d->arch.hvm_domain.pci_cf8;
+ cf8 = d->arch.hvm.pci_cf8;
if ( p->type == IOREQ_TYPE_PIO &&
(p->addr & ~3) == 0xcfc &&
struct domain *d = current->domain;
if ( dir == IOREQ_WRITE && bytes == 4 )
- d->arch.hvm_domain.pci_cf8 = *val;
+ d->arch.hvm.pci_cf8 = *val;
/* We always need to fall through to the catch all emulator */
return X86EMUL_UNHANDLEABLE;
void hvm_ioreq_init(struct domain *d)
{
- spin_lock_init(&d->arch.hvm_domain.ioreq_server.lock);
+ spin_lock_init(&d->arch.hvm.ioreq_server.lock);
register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
}
return -1;
}
- spin_lock(&d->arch.hvm_domain.irq_lock);
+ spin_lock(&d->arch.hvm.irq_lock);
if ( !level || hvm_irq->gsi_assert_count[gsi]++ == 0 )
assert_gsi(d, gsi);
vector = vioapic_get_vector(d, gsi);
- spin_unlock(&d->arch.hvm_domain.irq_lock);
+ spin_unlock(&d->arch.hvm.irq_lock);
return vector;
}
return;
}
- spin_lock(&d->arch.hvm_domain.irq_lock);
+ spin_lock(&d->arch.hvm.irq_lock);
hvm_irq->gsi_assert_count[gsi]--;
- spin_unlock(&d->arch.hvm_domain.irq_lock);
+ spin_unlock(&d->arch.hvm.irq_lock);
}
static void assert_irq(struct domain *d, unsigned ioapic_gsi, unsigned pic_irq)
void hvm_pci_intx_assert(
struct domain *d, unsigned int device, unsigned int intx)
{
- spin_lock(&d->arch.hvm_domain.irq_lock);
+ spin_lock(&d->arch.hvm.irq_lock);
__hvm_pci_intx_assert(d, device, intx);
- spin_unlock(&d->arch.hvm_domain.irq_lock);
+ spin_unlock(&d->arch.hvm.irq_lock);
}
static void __hvm_pci_intx_deassert(
void hvm_pci_intx_deassert(
struct domain *d, unsigned int device, unsigned int intx)
{
- spin_lock(&d->arch.hvm_domain.irq_lock);
+ spin_lock(&d->arch.hvm.irq_lock);
__hvm_pci_intx_deassert(d, device, intx);
- spin_unlock(&d->arch.hvm_domain.irq_lock);
+ spin_unlock(&d->arch.hvm.irq_lock);
}
void hvm_gsi_assert(struct domain *d, unsigned int gsi)
* for the hardware domain, Xen needs to rely on gsi_assert_count in order
* to know if the GSI is pending or not.
*/
- spin_lock(&d->arch.hvm_domain.irq_lock);
+ spin_lock(&d->arch.hvm.irq_lock);
if ( !hvm_irq->gsi_assert_count[gsi] )
{
hvm_irq->gsi_assert_count[gsi] = 1;
assert_gsi(d, gsi);
}
- spin_unlock(&d->arch.hvm_domain.irq_lock);
+ spin_unlock(&d->arch.hvm.irq_lock);
}
void hvm_gsi_deassert(struct domain *d, unsigned int gsi)
return;
}
- spin_lock(&d->arch.hvm_domain.irq_lock);
+ spin_lock(&d->arch.hvm.irq_lock);
hvm_irq->gsi_assert_count[gsi] = 0;
- spin_unlock(&d->arch.hvm_domain.irq_lock);
+ spin_unlock(&d->arch.hvm.irq_lock);
}
int hvm_isa_irq_assert(struct domain *d, unsigned int isa_irq,
ASSERT(isa_irq <= 15);
- spin_lock(&d->arch.hvm_domain.irq_lock);
+ spin_lock(&d->arch.hvm.irq_lock);
if ( !__test_and_set_bit(isa_irq, &hvm_irq->isa_irq.i) &&
(hvm_irq->gsi_assert_count[gsi]++ == 0) )
if ( get_vector )
vector = get_vector(d, gsi);
- spin_unlock(&d->arch.hvm_domain.irq_lock);
+ spin_unlock(&d->arch.hvm.irq_lock);
return vector;
}
ASSERT(isa_irq <= 15);
- spin_lock(&d->arch.hvm_domain.irq_lock);
+ spin_lock(&d->arch.hvm.irq_lock);
if ( __test_and_clear_bit(isa_irq, &hvm_irq->isa_irq.i) &&
(--hvm_irq->gsi_assert_count[gsi] == 0) )
deassert_irq(d, isa_irq);
- spin_unlock(&d->arch.hvm_domain.irq_lock);
+ spin_unlock(&d->arch.hvm.irq_lock);
}
static void hvm_set_callback_irq_level(struct vcpu *v)
ASSERT(v->vcpu_id == 0);
- spin_lock(&d->arch.hvm_domain.irq_lock);
+ spin_lock(&d->arch.hvm.irq_lock);
/* NB. Do not check the evtchn_upcall_mask. It is not used in HVM mode. */
asserted = !!vcpu_info(v, evtchn_upcall_pending);
}
out:
- spin_unlock(&d->arch.hvm_domain.irq_lock);
+ spin_unlock(&d->arch.hvm.irq_lock);
}
void hvm_maybe_deassert_evtchn_irq(void)
if ( (link > 3) || (isa_irq > 15) )
return -EINVAL;
- spin_lock(&d->arch.hvm_domain.irq_lock);
+ spin_lock(&d->arch.hvm.irq_lock);
old_isa_irq = hvm_irq->pci_link.route[link];
if ( old_isa_irq == isa_irq )
}
out:
- spin_unlock(&d->arch.hvm_domain.irq_lock);
+ spin_unlock(&d->arch.hvm.irq_lock);
dprintk(XENLOG_G_INFO, "Dom%u PCI link %u changed %u -> %u\n",
d->domain_id, link, old_isa_irq, isa_irq);
(!has_vlapic(d) || !has_vioapic(d) || !has_vpic(d)) )
return;
- spin_lock(&d->arch.hvm_domain.irq_lock);
+ spin_lock(&d->arch.hvm.irq_lock);
/* Tear down old callback via. */
if ( hvm_irq->callback_via_asserted )
break;
}
- spin_unlock(&d->arch.hvm_domain.irq_lock);
+ spin_unlock(&d->arch.hvm.irq_lock);
for_each_vcpu ( d, v )
if ( is_vcpu_online(v) )
struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
{
- struct hvm_domain *plat = &v->domain->arch.hvm_domain;
+ struct hvm_domain *plat = &v->domain->arch.hvm;
int vector;
if ( unlikely(v->nmi_pending) )
unsigned int asserted, pdev, pintx;
int rc;
- spin_lock(&d->arch.hvm_domain.irq_lock);
+ spin_lock(&d->arch.hvm.irq_lock);
pdev = hvm_irq->callback_via.pci.dev;
pintx = hvm_irq->callback_via.pci.intx;
if ( asserted )
__hvm_pci_intx_assert(d, pdev, pintx);
- spin_unlock(&d->arch.hvm_domain.irq_lock);
+ spin_unlock(&d->arch.hvm.irq_lock);
return rc;
}
void hvm_init_cacheattr_region_list(struct domain *d)
{
- INIT_LIST_HEAD(&d->arch.hvm_domain.pinned_cacheattr_ranges);
+ INIT_LIST_HEAD(&d->arch.hvm.pinned_cacheattr_ranges);
}
void hvm_destroy_cacheattr_region_list(struct domain *d)
{
- struct list_head *head = &d->arch.hvm_domain.pinned_cacheattr_ranges;
+ struct list_head *head = &d->arch.hvm.pinned_cacheattr_ranges;
struct hvm_mem_pinned_cacheattr_range *range;
while ( !list_empty(head) )
rcu_read_lock(&pinned_cacheattr_rcu_lock);
list_for_each_entry_rcu ( range,
- &d->arch.hvm_domain.pinned_cacheattr_ranges,
+ &d->arch.hvm.pinned_cacheattr_ranges,
list )
{
if ( ((gfn_x(gfn) & mask) >= range->start) &&
/* Remove the requested range. */
rcu_read_lock(&pinned_cacheattr_rcu_lock);
list_for_each_entry_rcu ( range,
- &d->arch.hvm_domain.pinned_cacheattr_ranges,
+ &d->arch.hvm.pinned_cacheattr_ranges,
list )
if ( range->start == gfn_start && range->end == gfn_end )
{
rcu_read_lock(&pinned_cacheattr_rcu_lock);
list_for_each_entry_rcu ( range,
- &d->arch.hvm_domain.pinned_cacheattr_ranges,
+ &d->arch.hvm.pinned_cacheattr_ranges,
list )
{
if ( range->start == gfn_start && range->end == gfn_end )
range->end = gfn_end;
range->type = type;
- list_add_rcu(&range->list, &d->arch.hvm_domain.pinned_cacheattr_ranges);
+ list_add_rcu(&range->list, &d->arch.hvm.pinned_cacheattr_ranges);
p2m_memory_type_changed(d);
if ( type != PAT_TYPE_WRBACK )
flush_all(FLUSH_CACHE);
if ( direct_mmio )
{
- if ( (mfn_x(mfn) ^ d->arch.hvm_domain.vmx.apic_access_mfn) >> order )
+ if ( (mfn_x(mfn) ^ d->arch.hvm.vmx.apic_access_mfn) >> order )
return MTRR_TYPE_UNCACHABLE;
if ( order )
return -1;
/* Dispatch SCIs based on the PM1a_STS and PM1a_EN registers */
static void pmt_update_sci(PMTState *s)
{
- struct hvm_hw_acpi *acpi = &s->vcpu->domain->arch.hvm_domain.acpi;
+ struct hvm_hw_acpi *acpi = &s->vcpu->domain->arch.hvm.acpi;
ASSERT(spin_is_locked(&s->lock));
void hvm_acpi_power_button(struct domain *d)
{
- PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
+ PMTState *s = &d->arch.hvm.pl_time->vpmt;
if ( !has_vpm(d) )
return;
spin_lock(&s->lock);
- d->arch.hvm_domain.acpi.pm1a_sts |= PWRBTN_STS;
+ d->arch.hvm.acpi.pm1a_sts |= PWRBTN_STS;
pmt_update_sci(s);
spin_unlock(&s->lock);
}
void hvm_acpi_sleep_button(struct domain *d)
{
- PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
+ PMTState *s = &d->arch.hvm.pl_time->vpmt;
if ( !has_vpm(d) )
return;
spin_lock(&s->lock);
- d->arch.hvm_domain.acpi.pm1a_sts |= PWRBTN_STS;
+ d->arch.hvm.acpi.pm1a_sts |= PWRBTN_STS;
pmt_update_sci(s);
spin_unlock(&s->lock);
}
static void pmt_update_time(PMTState *s)
{
uint64_t curr_gtime, tmp;
- struct hvm_hw_acpi *acpi = &s->vcpu->domain->arch.hvm_domain.acpi;
+ struct hvm_hw_acpi *acpi = &s->vcpu->domain->arch.hvm.acpi;
uint32_t tmr_val = acpi->tmr_val, msb = tmr_val & TMR_VAL_MSB;
ASSERT(spin_is_locked(&s->lock));
/* How close are we to the next MSB flip? */
pmt_cycles_until_flip = TMR_VAL_MSB -
- (s->vcpu->domain->arch.hvm_domain.acpi.tmr_val & (TMR_VAL_MSB - 1));
+ (s->vcpu->domain->arch.hvm.acpi.tmr_val & (TMR_VAL_MSB - 1));
/* Overall time between MSB flips */
time_until_flip = (1000000000ULL << 23) / FREQUENCE_PMTIMER;
int dir, unsigned int port, unsigned int bytes, uint32_t *val)
{
struct vcpu *v = current;
- struct hvm_hw_acpi *acpi = &v->domain->arch.hvm_domain.acpi;
- PMTState *s = &v->domain->arch.hvm_domain.pl_time->vpmt;
+ struct hvm_hw_acpi *acpi = &v->domain->arch.hvm.acpi;
+ PMTState *s = &v->domain->arch.hvm.pl_time->vpmt;
uint32_t addr, data, byte;
int i;
addr = port -
- ((v->domain->arch.hvm_domain.params[
+ ((v->domain->arch.hvm.params[
HVM_PARAM_ACPI_IOPORTS_LOCATION] == 0) ?
PM1a_STS_ADDR_V0 : PM1a_STS_ADDR_V1);
int dir, unsigned int port, unsigned int bytes, uint32_t *val)
{
struct vcpu *v = current;
- struct hvm_hw_acpi *acpi = &v->domain->arch.hvm_domain.acpi;
- PMTState *s = &v->domain->arch.hvm_domain.pl_time->vpmt;
+ struct hvm_hw_acpi *acpi = &v->domain->arch.hvm.acpi;
+ PMTState *s = &v->domain->arch.hvm.pl_time->vpmt;
if ( bytes != 4 || dir != IOREQ_READ )
{
static int acpi_save(struct domain *d, hvm_domain_context_t *h)
{
- struct hvm_hw_acpi *acpi = &d->arch.hvm_domain.acpi;
- PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
+ struct hvm_hw_acpi *acpi = &d->arch.hvm.acpi;
+ PMTState *s = &d->arch.hvm.pl_time->vpmt;
uint32_t x, msb = acpi->tmr_val & TMR_VAL_MSB;
int rc;
static int acpi_load(struct domain *d, hvm_domain_context_t *h)
{
- struct hvm_hw_acpi *acpi = &d->arch.hvm_domain.acpi;
- PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
+ struct hvm_hw_acpi *acpi = &d->arch.hvm.acpi;
+ PMTState *s = &d->arch.hvm.pl_time->vpmt;
if ( !has_vpm(d) )
return -ENODEV;
return -ENODEV;
/* Check that version is changing. */
- old_version = d->arch.hvm_domain.params[HVM_PARAM_ACPI_IOPORTS_LOCATION];
+ old_version = d->arch.hvm.params[HVM_PARAM_ACPI_IOPORTS_LOCATION];
if ( version == old_version )
return 0;
void pmtimer_init(struct vcpu *v)
{
- PMTState *s = &v->domain->arch.hvm_domain.pl_time->vpmt;
+ PMTState *s = &v->domain->arch.hvm.pl_time->vpmt;
if ( !has_vpm(v->domain) )
return;
void pmtimer_deinit(struct domain *d)
{
- PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
+ PMTState *s = &d->arch.hvm.pl_time->vpmt;
if ( !has_vpm(d) )
return;
return;
/* Reset the counter. */
- d->arch.hvm_domain.acpi.tmr_val = 0;
+ d->arch.hvm.acpi.tmr_val = 0;
}
/*
#define MIN_PER_HOUR 60
#define HOUR_PER_DAY 24
-#define domain_vrtc(x) (&(x)->arch.hvm_domain.pl_time->vrtc)
+#define domain_vrtc(x) (&(x)->arch.hvm.pl_time->vrtc)
#define vcpu_vrtc(x) (domain_vrtc((x)->domain))
#define vrtc_domain(x) (container_of(x, struct pl_time, vrtc)->domain)
#define vrtc_vcpu(x) (pt_global_vcpu_target(vrtc_domain(x)))
s_time_t now = NOW();
s->period = period;
- if ( v->domain->arch.hvm_domain.params[HVM_PARAM_VPT_ALIGN] )
+ if ( v->domain->arch.hvm.params[HVM_PARAM_VPT_ALIGN] )
delta = 0;
else
delta = period - ((now - s->start_time) % period);
hdr->gtsc_khz = d->arch.tsc_khz;
/* Time when saving started */
- d->arch.hvm_domain.sync_tsc = rdtsc();
+ d->arch.hvm.sync_tsc = rdtsc();
}
int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr)
hvm_set_rdtsc_exiting(d, 1);
/* Time when restore started */
- d->arch.hvm_domain.sync_tsc = rdtsc();
+ d->arch.hvm.sync_tsc = rdtsc();
/* VGA state is not saved/restored, so we nobble the cache. */
- d->arch.hvm_domain.stdvga.cache = STDVGA_CACHE_DISABLED;
+ d->arch.hvm.stdvga.cache = STDVGA_CACHE_DISABLED;
return 0;
}
static int stdvga_outb(uint64_t addr, uint8_t val)
{
- struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm_domain.stdvga;
+ struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm.stdvga;
int rc = 1, prev_stdvga = s->stdvga;
switch ( addr )
static int stdvga_intercept_pio(
int dir, unsigned int port, unsigned int bytes, uint32_t *val)
{
- struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm_domain.stdvga;
+ struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm.stdvga;
if ( dir == IOREQ_WRITE )
{
static uint8_t stdvga_mem_readb(uint64_t addr)
{
- struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm_domain.stdvga;
+ struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm.stdvga;
int plane;
uint32_t ret, *vram_l;
uint8_t *vram_b;
static void stdvga_mem_writeb(uint64_t addr, uint32_t val)
{
- struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm_domain.stdvga;
+ struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm.stdvga;
int plane, write_mode, b, func_select, mask;
uint32_t write_mask, bit_mask, set_mask, *vram_l;
uint8_t *vram_b;
uint64_t addr, uint32_t size,
uint64_t data)
{
- struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm_domain.stdvga;
+ struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm.stdvga;
ioreq_t p = {
.type = IOREQ_TYPE_COPY,
.addr = addr,
static bool_t stdvga_mem_accept(const struct hvm_io_handler *handler,
const ioreq_t *p)
{
- struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm_domain.stdvga;
+ struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm.stdvga;
/*
* The range check must be done without taking the lock, to avoid
static void stdvga_mem_complete(const struct hvm_io_handler *handler)
{
- struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm_domain.stdvga;
+ struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm.stdvga;
spin_unlock(&s->lock);
}
void stdvga_init(struct domain *d)
{
- struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
+ struct hvm_hw_stdvga *s = &d->arch.hvm.stdvga;
struct page_info *pg;
unsigned int i;
void stdvga_deinit(struct domain *d)
{
- struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
+ struct hvm_hw_stdvga *s = &d->arch.hvm.stdvga;
int i;
if ( !has_vvga(d) )
static void svm_guest_osvw_init(struct domain *d)
{
- struct svm_domain *svm = &d->arch.hvm_domain.svm;
+ struct svm_domain *svm = &d->arch.hvm.svm;
spin_lock(&osvw_lock);
case MSR_AMD_OSVW_STATUS:
if ( !d->arch.cpuid->extd.osvw )
goto gpf;
- *msr_content =
- d->arch.hvm_domain.svm.osvw.raw[msr - MSR_AMD_OSVW_ID_LENGTH];
+ *msr_content = d->arch.hvm.svm.osvw.raw[msr - MSR_AMD_OSVW_ID_LENGTH];
break;
default:
svm_disable_intercept_for_msr(v, MSR_AMD64_LWP_CBADDR);
vmcb->_msrpm_base_pa = (u64)virt_to_maddr(arch_svm->msrpm);
- vmcb->_iopm_base_pa = __pa(v->domain->arch.hvm_domain.io_bitmap);
+ vmcb->_iopm_base_pa = __pa(v->domain->arch.hvm.io_bitmap);
/* Virtualise EFLAGS.IF and LAPIC TPR (CR8). */
vmcb->_vintr.fields.intr_masking = 1;
{
unsigned int i;
- for ( i = 0; i < d->arch.hvm_domain.nr_vioapics; i++ )
+ for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
{
struct hvm_vioapic *vioapic = domain_vioapic(d, i);
{
unsigned int i;
- for ( i = 0; i < d->arch.hvm_domain.nr_vioapics; i++ )
+ for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
{
struct hvm_vioapic *vioapic = domain_vioapic(d, i);
int unmasked = 0;
unsigned int gsi = vioapic->base_gsi + idx;
- spin_lock(&d->arch.hvm_domain.irq_lock);
+ spin_lock(&d->arch.hvm.irq_lock);
pent = &vioapic->redirtbl[idx];
ent = *pent;
vioapic_deliver(vioapic, idx);
}
- spin_unlock(&d->arch.hvm_domain.irq_lock);
+ spin_unlock(&d->arch.hvm.irq_lock);
if ( gsi == 0 || unmasked )
pt_may_unmask_irq(d, NULL);
struct vcpu *v;
unsigned int irq = vioapic->base_gsi + pin;
- ASSERT(spin_is_locked(&d->arch.hvm_domain.irq_lock));
+ ASSERT(spin_is_locked(&d->arch.hvm.irq_lock));
HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
"dest=%x dest_mode=%x delivery_mode=%x "
HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "irq %x", irq);
ASSERT(pin < vioapic->nr_pins);
- ASSERT(spin_is_locked(&d->arch.hvm_domain.irq_lock));
+ ASSERT(spin_is_locked(&d->arch.hvm.irq_lock));
ent = &vioapic->redirtbl[pin];
if ( ent->fields.mask )
ASSERT(has_vioapic(d));
- spin_lock(&d->arch.hvm_domain.irq_lock);
+ spin_lock(&d->arch.hvm.irq_lock);
- for ( i = 0; i < d->arch.hvm_domain.nr_vioapics; i++ )
+ for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
{
struct hvm_vioapic *vioapic = domain_vioapic(d, i);
unsigned int pin;
if ( iommu_enabled )
{
- spin_unlock(&d->arch.hvm_domain.irq_lock);
+ spin_unlock(&d->arch.hvm.irq_lock);
hvm_dpci_eoi(d, vioapic->base_gsi + pin, ent);
- spin_lock(&d->arch.hvm_domain.irq_lock);
+ spin_lock(&d->arch.hvm.irq_lock);
}
if ( (ent->fields.trig_mode == VIOAPIC_LEVEL_TRIG) &&
}
}
- spin_unlock(&d->arch.hvm_domain.irq_lock);
+ spin_unlock(&d->arch.hvm.irq_lock);
}
int vioapic_get_mask(const struct domain *d, unsigned int gsi)
s = domain_vioapic(d, 0);
if ( s->nr_pins != ARRAY_SIZE(s->domU.redirtbl) ||
- d->arch.hvm_domain.nr_vioapics != 1 )
+ d->arch.hvm.nr_vioapics != 1 )
return -EOPNOTSUPP;
return hvm_save_entry(IOAPIC, 0, h, &s->domU);
s = domain_vioapic(d, 0);
if ( s->nr_pins != ARRAY_SIZE(s->domU.redirtbl) ||
- d->arch.hvm_domain.nr_vioapics != 1 )
+ d->arch.hvm.nr_vioapics != 1 )
return -EOPNOTSUPP;
return hvm_load_entry(IOAPIC, h, &s->domU);
if ( !has_vioapic(d) )
{
- ASSERT(!d->arch.hvm_domain.nr_vioapics);
+ ASSERT(!d->arch.hvm.nr_vioapics);
return;
}
- for ( i = 0; i < d->arch.hvm_domain.nr_vioapics; i++ )
+ for ( i = 0; i < d->arch.hvm.nr_vioapics; i++ )
{
struct hvm_vioapic *vioapic = domain_vioapic(d, i);
unsigned int nr_pins = vioapic->nr_pins, base_gsi = vioapic->base_gsi;
for ( i = 0; i < nr_vioapics; i++)
xfree(domain_vioapic(d, i));
- xfree(d->arch.hvm_domain.vioapic);
+ xfree(d->arch.hvm.vioapic);
}
int vioapic_init(struct domain *d)
if ( !has_vioapic(d) )
{
- ASSERT(!d->arch.hvm_domain.nr_vioapics);
+ ASSERT(!d->arch.hvm.nr_vioapics);
return 0;
}
nr_vioapics = is_hardware_domain(d) ? nr_ioapics : 1;
- if ( (d->arch.hvm_domain.vioapic == NULL) &&
- ((d->arch.hvm_domain.vioapic =
+ if ( (d->arch.hvm.vioapic == NULL) &&
+ ((d->arch.hvm.vioapic =
xzalloc_array(struct hvm_vioapic *, nr_vioapics)) == NULL) )
return -ENOMEM;
*/
ASSERT(hvm_domain_irq(d)->nr_gsis >= nr_gsis);
- d->arch.hvm_domain.nr_vioapics = nr_vioapics;
+ d->arch.hvm.nr_vioapics = nr_vioapics;
vioapic_reset(d);
register_mmio_handler(d, &vioapic_mmio_ops);
{
if ( !has_vioapic(d) )
{
- ASSERT(!d->arch.hvm_domain.nr_vioapics);
+ ASSERT(!d->arch.hvm.nr_vioapics);
return;
}
- vioapic_free(d, d->arch.hvm_domain.nr_vioapics);
+ vioapic_free(d, d->arch.hvm.nr_vioapics);
}
case 2:
/* Hypervisor information, but only if the guest has set its
own version number. */
- if ( d->arch.hvm_domain.viridian.guest_os_id.raw == 0 )
+ if ( d->arch.hvm.viridian.guest_os_id.raw == 0 )
break;
res->a = viridian_build;
res->b = ((uint32_t)viridian_major << 16) | viridian_minor;
case 4:
/* Recommended hypercall usage. */
- if ( (d->arch.hvm_domain.viridian.guest_os_id.raw == 0) ||
- (d->arch.hvm_domain.viridian.guest_os_id.fields.os < 4) )
+ if ( (d->arch.hvm.viridian.guest_os_id.raw == 0) ||
+ (d->arch.hvm.viridian.guest_os_id.fields.os < 4) )
break;
res->a = CPUID4A_RELAX_TIMER_INT;
if ( viridian_feature_mask(d) & HVMPV_hcall_remote_tlb_flush )
{
const union viridian_guest_os_id *goi;
- goi = &d->arch.hvm_domain.viridian.guest_os_id;
+ goi = &d->arch.hvm.viridian.guest_os_id;
printk(XENLOG_G_INFO
"d%d: VIRIDIAN GUEST_OS_ID: vendor: %x os: %x major: %x minor: %x sp: %x build: %x\n",
{
const union viridian_hypercall_gpa *hg;
- hg = &d->arch.hvm_domain.viridian.hypercall_gpa;
+ hg = &d->arch.hvm.viridian.hypercall_gpa;
printk(XENLOG_G_INFO "d%d: VIRIDIAN HYPERCALL: enabled: %x pfn: %lx\n",
d->domain_id,
{
const union viridian_reference_tsc *rt;
- rt = &d->arch.hvm_domain.viridian.reference_tsc;
+ rt = &d->arch.hvm.viridian.reference_tsc;
printk(XENLOG_G_INFO "d%d: VIRIDIAN REFERENCE_TSC: enabled: %x pfn: %lx\n",
d->domain_id,
static void enable_hypercall_page(struct domain *d)
{
- unsigned long gmfn = d->arch.hvm_domain.viridian.hypercall_gpa.fields.pfn;
+ unsigned long gmfn = d->arch.hvm.viridian.hypercall_gpa.fields.pfn;
struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
uint8_t *p;
static void update_reference_tsc(struct domain *d, bool_t initialize)
{
- unsigned long gmfn = d->arch.hvm_domain.viridian.reference_tsc.fields.pfn;
+ unsigned long gmfn = d->arch.hvm.viridian.reference_tsc.fields.pfn;
struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
HV_REFERENCE_TSC_PAGE *p;
{
case HV_X64_MSR_GUEST_OS_ID:
perfc_incr(mshv_wrmsr_osid);
- d->arch.hvm_domain.viridian.guest_os_id.raw = val;
+ d->arch.hvm.viridian.guest_os_id.raw = val;
dump_guest_os_id(d);
break;
case HV_X64_MSR_HYPERCALL:
perfc_incr(mshv_wrmsr_hc_page);
- d->arch.hvm_domain.viridian.hypercall_gpa.raw = val;
+ d->arch.hvm.viridian.hypercall_gpa.raw = val;
dump_hypercall(d);
- if ( d->arch.hvm_domain.viridian.hypercall_gpa.fields.enabled )
+ if ( d->arch.hvm.viridian.hypercall_gpa.fields.enabled )
enable_hypercall_page(d);
break;
return 0;
perfc_incr(mshv_wrmsr_tsc_msr);
- d->arch.hvm_domain.viridian.reference_tsc.raw = val;
+ d->arch.hvm.viridian.reference_tsc.raw = val;
dump_reference_tsc(d);
- if ( d->arch.hvm_domain.viridian.reference_tsc.fields.enabled )
+ if ( d->arch.hvm.viridian.reference_tsc.fields.enabled )
update_reference_tsc(d, 1);
break;
{
struct viridian_time_ref_count *trc;
- trc = &d->arch.hvm_domain.viridian.time_ref_count;
+ trc = &d->arch.hvm.viridian.time_ref_count;
if ( test_and_clear_bit(_TRC_running, &trc->flags) )
trc->val = raw_trc_val(d) + trc->off;
{
struct viridian_time_ref_count *trc;
- trc = &d->arch.hvm_domain.viridian.time_ref_count;
+ trc = &d->arch.hvm.viridian.time_ref_count;
if ( !d->is_shutting_down &&
!test_and_set_bit(_TRC_running, &trc->flags) )
{
case HV_X64_MSR_GUEST_OS_ID:
perfc_incr(mshv_rdmsr_osid);
- *val = d->arch.hvm_domain.viridian.guest_os_id.raw;
+ *val = d->arch.hvm.viridian.guest_os_id.raw;
break;
case HV_X64_MSR_HYPERCALL:
perfc_incr(mshv_rdmsr_hc_page);
- *val = d->arch.hvm_domain.viridian.hypercall_gpa.raw;
+ *val = d->arch.hvm.viridian.hypercall_gpa.raw;
break;
case HV_X64_MSR_VP_INDEX:
return 0;
perfc_incr(mshv_rdmsr_tsc_msr);
- *val = d->arch.hvm_domain.viridian.reference_tsc.raw;
+ *val = d->arch.hvm.viridian.reference_tsc.raw;
break;
case HV_X64_MSR_TIME_REF_COUNT:
{
struct viridian_time_ref_count *trc;
- trc = &d->arch.hvm_domain.viridian.time_ref_count;
+ trc = &d->arch.hvm.viridian.time_ref_count;
if ( !(viridian_feature_mask(d) & HVMPV_time_ref_count) )
return 0;
static int viridian_save_domain_ctxt(struct domain *d, hvm_domain_context_t *h)
{
struct hvm_viridian_domain_context ctxt = {
- .time_ref_count = d->arch.hvm_domain.viridian.time_ref_count.val,
- .hypercall_gpa = d->arch.hvm_domain.viridian.hypercall_gpa.raw,
- .guest_os_id = d->arch.hvm_domain.viridian.guest_os_id.raw,
- .reference_tsc = d->arch.hvm_domain.viridian.reference_tsc.raw,
+ .time_ref_count = d->arch.hvm.viridian.time_ref_count.val,
+ .hypercall_gpa = d->arch.hvm.viridian.hypercall_gpa.raw,
+ .guest_os_id = d->arch.hvm.viridian.guest_os_id.raw,
+ .reference_tsc = d->arch.hvm.viridian.reference_tsc.raw,
};
if ( !is_viridian_domain(d) )
if ( hvm_load_entry_zeroextend(VIRIDIAN_DOMAIN, h, &ctxt) != 0 )
return -EINVAL;
- d->arch.hvm_domain.viridian.time_ref_count.val = ctxt.time_ref_count;
- d->arch.hvm_domain.viridian.hypercall_gpa.raw = ctxt.hypercall_gpa;
- d->arch.hvm_domain.viridian.guest_os_id.raw = ctxt.guest_os_id;
- d->arch.hvm_domain.viridian.reference_tsc.raw = ctxt.reference_tsc;
+ d->arch.hvm.viridian.time_ref_count.val = ctxt.time_ref_count;
+ d->arch.hvm.viridian.hypercall_gpa.raw = ctxt.hypercall_gpa;
+ d->arch.hvm.viridian.guest_os_id.raw = ctxt.guest_os_id;
+ d->arch.hvm.viridian.reference_tsc.raw = ctxt.reference_tsc;
- if ( d->arch.hvm_domain.viridian.reference_tsc.fields.enabled )
+ if ( d->arch.hvm.viridian.reference_tsc.fields.enabled )
update_reference_tsc(d, 0);
return 0;
return 0;
TRACE_2D(TRC_HVM_EMUL_LAPIC_PIC_INTR,
- (v == v->domain->arch.hvm_domain.i8259_target),
+ (v == v->domain->arch.hvm.i8259_target),
v ? __vlapic_accept_pic_intr(v) : -1);
- return ((v == v->domain->arch.hvm_domain.i8259_target) &&
+ return ((v == v->domain->arch.hvm.i8259_target) &&
__vlapic_accept_pic_intr(v));
}
v = d->vcpu ? d->vcpu[0] : NULL;
found:
- if ( d->arch.hvm_domain.i8259_target == v )
+ if ( d->arch.hvm.i8259_target == v )
return;
- d->arch.hvm_domain.i8259_target = v;
+ d->arch.hvm.i8259_target = v;
pt_adjust_global_vcpu_target(v);
}
*/
static bool msixtbl_initialised(const struct domain *d)
{
- return !!d->arch.hvm_domain.msixtbl_list.next;
+ return d->arch.hvm.msixtbl_list.next;
}
static struct msixtbl_entry *msixtbl_find_entry(
struct msixtbl_entry *entry;
struct domain *d = v->domain;
- list_for_each_entry( entry, &d->arch.hvm_domain.msixtbl_list, list )
+ list_for_each_entry( entry, &d->arch.hvm.msixtbl_list, list )
if ( addr >= entry->gtable &&
addr < entry->gtable + entry->table_len )
return entry;
entry->pdev = pdev;
entry->gtable = (unsigned long) gtable;
- list_add_rcu(&entry->list, &d->arch.hvm_domain.msixtbl_list);
+ list_add_rcu(&entry->list, &d->arch.hvm.msixtbl_list);
}
static void free_msixtbl_entry(struct rcu_head *rcu)
pdev = msi_desc->dev;
- list_for_each_entry( entry, &d->arch.hvm_domain.msixtbl_list, list )
+ list_for_each_entry( entry, &d->arch.hvm.msixtbl_list, list )
if ( pdev == entry->pdev )
goto found;
pdev = msi_desc->dev;
- list_for_each_entry( entry, &d->arch.hvm_domain.msixtbl_list, list )
+ list_for_each_entry( entry, &d->arch.hvm.msixtbl_list, list )
if ( pdev == entry->pdev )
goto found;
if ( !is_hvm_domain(d) || !has_vlapic(d) || msixtbl_initialised(d) )
return;
- INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list);
+ INIT_LIST_HEAD(&d->arch.hvm.msixtbl_list);
handler = hvm_next_io_handler(d);
if ( handler )
spin_lock(&d->event_lock);
list_for_each_entry_safe( entry, temp,
- &d->arch.hvm_domain.msixtbl_list, list )
+ &d->arch.hvm.msixtbl_list, list )
del_msixtbl_entry(entry);
spin_unlock(&d->event_lock);
}
/* I/O access bitmap. */
- __vmwrite(IO_BITMAP_A, __pa(d->arch.hvm_domain.io_bitmap));
- __vmwrite(IO_BITMAP_B, __pa(d->arch.hvm_domain.io_bitmap) + PAGE_SIZE);
+ __vmwrite(IO_BITMAP_A, __pa(d->arch.hvm.io_bitmap));
+ __vmwrite(IO_BITMAP_B, __pa(d->arch.hvm.io_bitmap) + PAGE_SIZE);
if ( cpu_has_vmx_virtual_intr_delivery )
{
__vmwrite(XSS_EXIT_BITMAP, 0);
if ( cpu_has_vmx_tsc_scaling )
- __vmwrite(TSC_MULTIPLIER, d->arch.hvm_domain.tsc_scaling_ratio);
+ __vmwrite(TSC_MULTIPLIER, d->arch.hvm.tsc_scaling_ratio);
/* will update HOST & GUEST_CR3 as reqd */
paging_update_paging_modes(v);
bool_t vmx_domain_pml_enabled(const struct domain *d)
{
- return !!(d->arch.hvm_domain.vmx.status & VMX_DOMAIN_PML_ENABLED);
+ return d->arch.hvm.vmx.status & VMX_DOMAIN_PML_ENABLED;
}
/*
if ( (rc = vmx_vcpu_enable_pml(v)) != 0 )
goto error;
- d->arch.hvm_domain.vmx.status |= VMX_DOMAIN_PML_ENABLED;
+ d->arch.hvm.vmx.status |= VMX_DOMAIN_PML_ENABLED;
return 0;
for_each_vcpu ( d, v )
vmx_vcpu_disable_pml(v);
- d->arch.hvm_domain.vmx.status &= ~VMX_DOMAIN_PML_ENABLED;
+ d->arch.hvm.vmx.status &= ~VMX_DOMAIN_PML_ENABLED;
}
/*
if ( !iommu_intpost || !is_hvm_domain(d) )
return;
- ASSERT(!d->arch.hvm_domain.pi_ops.vcpu_block);
+ ASSERT(!d->arch.hvm.pi_ops.vcpu_block);
/*
* We carefully handle the timing here:
* This can make sure the PI (especially the NDST feild) is
* in proper state when we call vmx_vcpu_block().
*/
- d->arch.hvm_domain.pi_ops.flags = PI_CSW_FROM | PI_CSW_TO;
+ d->arch.hvm.pi_ops.flags = PI_CSW_FROM | PI_CSW_TO;
for_each_vcpu ( d, v )
{
x2apic_enabled ? dest : MASK_INSR(dest, PI_xAPIC_NDST_MASK));
}
- d->arch.hvm_domain.pi_ops.vcpu_block = vmx_vcpu_block;
+ d->arch.hvm.pi_ops.vcpu_block = vmx_vcpu_block;
}
/* This function is called when pcidevs_lock is held */
if ( !iommu_intpost || !is_hvm_domain(d) )
return;
- ASSERT(d->arch.hvm_domain.pi_ops.vcpu_block);
+ ASSERT(d->arch.hvm.pi_ops.vcpu_block);
/*
* Pausing the domain can make sure the vCPUs are not
domain_pause(d);
/*
- * Note that we don't set 'd->arch.hvm_domain.pi_ops.switch_to' to NULL
+ * Note that we don't set 'd->arch.hvm.pi_ops.switch_to' to NULL
* here. If we deassign the hooks while the vCPU is runnable in the
* runqueue with 'SN' set, all the future notification event will be
* suppressed since vmx_deliver_posted_intr() also use 'SN' bit
* system, leave it here until we find a clean solution to deassign the
* 'switch_to' hook function.
*/
- d->arch.hvm_domain.pi_ops.vcpu_block = NULL;
- d->arch.hvm_domain.pi_ops.flags = PI_CSW_TO;
+ d->arch.hvm.pi_ops.vcpu_block = NULL;
+ d->arch.hvm.pi_ops.flags = PI_CSW_TO;
for_each_vcpu ( d, v )
vmx_pi_unblock_vcpu(v);
vmx_restore_host_msrs();
vmx_save_dr(v);
- if ( v->domain->arch.hvm_domain.pi_ops.flags & PI_CSW_FROM )
+ if ( v->domain->arch.hvm.pi_ops.flags & PI_CSW_FROM )
vmx_pi_switch_from(v);
}
vmx_restore_guest_msrs(v);
vmx_restore_dr(v);
- if ( v->domain->arch.hvm_domain.pi_ops.flags & PI_CSW_TO )
+ if ( v->domain->arch.hvm.pi_ops.flags & PI_CSW_TO )
vmx_pi_switch_to(v);
}
if ( seg == x86_seg_tr )
{
const struct domain *d = v->domain;
- uint64_t val = d->arch.hvm_domain.params[HVM_PARAM_VM86_TSS_SIZED];
+ uint64_t val = d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED];
if ( val )
{
if ( val & VM86_TSS_UPDATED )
{
hvm_prepare_vm86_tss(v, base, limit);
- cmpxchg(&d->arch.hvm_domain.params[HVM_PARAM_VM86_TSS_SIZED],
+ cmpxchg(&d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED],
val, val & ~VM86_TSS_UPDATED);
}
v->arch.hvm_vmx.vm86_segment_mask &= ~(1u << seg);
{
if ( !hvm_paging_enabled(v) && !vmx_unrestricted_guest(v) )
v->arch.hvm_vcpu.hw_cr[3] =
- v->domain->arch.hvm_domain.params[HVM_PARAM_IDENT_PT];
+ v->domain->arch.hvm.params[HVM_PARAM_IDENT_PT];
vmx_load_pdptrs(v);
}
mfn = page_to_mfn(pg);
clear_domain_page(mfn);
share_xen_page_with_guest(pg, d, SHARE_rw);
- d->arch.hvm_domain.vmx.apic_access_mfn = mfn_x(mfn);
+ d->arch.hvm.vmx.apic_access_mfn = mfn_x(mfn);
set_mmio_p2m_entry(d, paddr_to_pfn(APIC_DEFAULT_PHYS_BASE), mfn,
PAGE_ORDER_4K, p2m_get_hostp2m(d)->default_access);
static void vmx_free_vlapic_mapping(struct domain *d)
{
- unsigned long mfn = d->arch.hvm_domain.vmx.apic_access_mfn;
+ unsigned long mfn = d->arch.hvm.vmx.apic_access_mfn;
if ( mfn != 0 )
free_shared_domheap_page(mfn_to_page(_mfn(mfn)));
{
paddr_t virt_page_ma, apic_page_ma;
- if ( v->domain->arch.hvm_domain.vmx.apic_access_mfn == 0 )
+ if ( v->domain->arch.hvm.vmx.apic_access_mfn == 0 )
return;
ASSERT(cpu_has_vmx_virtualize_apic_accesses);
virt_page_ma = page_to_maddr(vcpu_vlapic(v)->regs_page);
- apic_page_ma = v->domain->arch.hvm_domain.vmx.apic_access_mfn;
+ apic_page_ma = v->domain->arch.hvm.vmx.apic_access_mfn;
apic_page_ma <<= PAGE_SHIFT;
vmx_vmcs_enter(v);
if ( nestedhvm_vcpu_in_guestmode(curr) && vcpu_nestedhvm(curr).stale_np2m )
return false;
- if ( curr->domain->arch.hvm_domain.pi_ops.vcpu_block )
+ if ( curr->domain->arch.hvm.pi_ops.vcpu_block )
vmx_pi_do_resume(curr);
if ( !cpu_has_vmx_vpid )
#include <asm/hvm/support.h>
#define vpic_domain(v) (container_of((v), struct domain, \
- arch.hvm_domain.vpic[!vpic->is_master]))
+ arch.hvm.vpic[!vpic->is_master]))
#define __vpic_lock(v) &container_of((v), struct hvm_domain, \
vpic[!(v)->is_master])->irq_lock
#define vpic_lock(v) spin_lock(__vpic_lock(v))
if ( vpic->is_master )
{
/* Master INT line is connected in Virtual Wire Mode. */
- struct vcpu *v = vpic_domain(vpic)->arch.hvm_domain.i8259_target;
+ struct vcpu *v = vpic_domain(vpic)->arch.hvm.i8259_target;
+
if ( v != NULL )
{
TRACE_1D(TRC_HVM_EMUL_PIC_KICK, irq);
return X86EMUL_OKAY;
}
- vpic = ¤t->domain->arch.hvm_domain.vpic[port >> 7];
+ vpic = ¤t->domain->arch.hvm.vpic[port >> 7];
if ( dir == IOREQ_WRITE )
vpic_ioport_write(vpic, port, (uint8_t)*val);
BUG_ON(bytes != 1);
- vpic = ¤t->domain->arch.hvm_domain.vpic[port & 1];
+ vpic = ¤t->domain->arch.hvm.vpic[port & 1];
if ( dir == IOREQ_WRITE )
{
/* Save the state of both PICs */
for ( i = 0; i < 2 ; i++ )
{
- s = &d->arch.hvm_domain.vpic[i];
+ s = &d->arch.hvm.vpic[i];
if ( hvm_save_entry(PIC, i, h, s) )
return 1;
}
/* Which PIC is this? */
if ( inst > 1 )
return -EINVAL;
- s = &d->arch.hvm_domain.vpic[inst];
+ s = &d->arch.hvm.vpic[inst];
/* Load the state */
if ( hvm_load_entry(PIC, h, s) != 0 )
return;
/* Master PIC. */
- vpic = &d->arch.hvm_domain.vpic[0];
+ vpic = &d->arch.hvm.vpic[0];
memset(vpic, 0, sizeof(*vpic));
vpic->is_master = 1;
vpic->elcr = 1 << 2;
void vpic_irq_positive_edge(struct domain *d, int irq)
{
- struct hvm_hw_vpic *vpic = &d->arch.hvm_domain.vpic[irq >> 3];
+ struct hvm_hw_vpic *vpic = &d->arch.hvm.vpic[irq >> 3];
uint8_t mask = 1 << (irq & 7);
ASSERT(has_vpic(d));
void vpic_irq_negative_edge(struct domain *d, int irq)
{
- struct hvm_hw_vpic *vpic = &d->arch.hvm_domain.vpic[irq >> 3];
+ struct hvm_hw_vpic *vpic = &d->arch.hvm.vpic[irq >> 3];
uint8_t mask = 1 << (irq & 7);
ASSERT(has_vpic(d));
int vpic_ack_pending_irq(struct vcpu *v)
{
int irq, vector;
- struct hvm_hw_vpic *vpic = &v->domain->arch.hvm_domain.vpic[0];
+ struct hvm_hw_vpic *vpic = &v->domain->arch.hvm.vpic[0];
ASSERT(has_vpic(v->domain));
#include <asm/mc146818rtc.h>
#define mode_is(d, name) \
- ((d)->arch.hvm_domain.params[HVM_PARAM_TIMER_MODE] == HVMPTM_##name)
+ ((d)->arch.hvm.params[HVM_PARAM_TIMER_MODE] == HVMPTM_##name)
void hvm_init_guest_time(struct domain *d)
{
- struct pl_time *pl = d->arch.hvm_domain.pl_time;
+ struct pl_time *pl = d->arch.hvm.pl_time;
spin_lock_init(&pl->pl_time_lock);
pl->stime_offset = -(u64)get_s_time();
uint64_t hvm_get_guest_time_fixed(const struct vcpu *v, uint64_t at_tsc)
{
- struct pl_time *pl = v->domain->arch.hvm_domain.pl_time;
+ struct pl_time *pl = v->domain->arch.hvm.pl_time;
u64 now;
/* Called from device models shared with PV guests. Be careful. */
gsi = hvm_isa_irq_to_gsi(isa_irq);
if ( src == hvm_intsrc_pic )
- return (v->domain->arch.hvm_domain.vpic[isa_irq >> 3].irq_base
+ return (v->domain->arch.hvm.vpic[isa_irq >> 3].irq_base
+ (isa_irq & 7));
ASSERT(src == hvm_intsrc_lapic);
case PTSRC_isa:
{
- uint8_t pic_imr = v->domain->arch.hvm_domain.vpic[pt->irq >> 3].imr;
+ uint8_t pic_imr = v->domain->arch.hvm.vpic[pt->irq >> 3].imr;
/* Check if the interrupt is unmasked in the PIC. */
if ( !(pic_imr & (1 << (pt->irq & 7))) && vlapic_accept_pic_intr(v) )
case PTSRC_isa:
hvm_isa_irq_deassert(v->domain, irq);
if ( platform_legacy_irq(irq) && vlapic_accept_pic_intr(v) &&
- v->domain->arch.hvm_domain.vpic[irq >> 3].int_output )
+ v->domain->arch.hvm.vpic[irq >> 3].int_output )
hvm_isa_irq_assert(v->domain, irq, NULL);
else
{
if ( !pt->one_shot )
{
- if ( v->domain->arch.hvm_domain.params[HVM_PARAM_VPT_ALIGN] )
+ if ( v->domain->arch.hvm.params[HVM_PARAM_VPT_ALIGN] )
{
pt->scheduled = align_timer(pt->scheduled, pt->period);
}
pt_adjust_vcpu(&vpit->pt0, v);
spin_unlock(&vpit->lock);
- pl_time = v->domain->arch.hvm_domain.pl_time;
+ pl_time = v->domain->arch.hvm.pl_time;
spin_lock(&pl_time->vrtc.lock);
pt_adjust_vcpu(&pl_time->vrtc.pt, v);
if ( d )
{
pt_resume(&d->arch.vpit.pt0);
- pt_resume(&d->arch.hvm_domain.pl_time->vrtc.pt);
+ pt_resume(&d->arch.hvm.pl_time->vrtc.pt);
for ( i = 0; i < HPET_TIMER_NUM; i++ )
- pt_resume(&d->arch.hvm_domain.pl_time->vhpet.pt[i]);
+ pt_resume(&d->arch.hvm.pl_time->vhpet.pt[i]);
}
if ( vlapic_pt )
radix_tree_init(&d->arch.irq_pirq);
if ( is_hvm_domain(d) )
- radix_tree_init(&d->arch.hvm_domain.emuirq_pirq);
+ radix_tree_init(&d->arch.hvm.emuirq_pirq);
for ( i = 1; platform_legacy_irq(i); ++i )
{
{
radix_tree_destroy(&d->arch.irq_pirq, NULL);
if ( is_hvm_domain(d) )
- radix_tree_destroy(&d->arch.hvm_domain.emuirq_pirq, NULL);
+ radix_tree_destroy(&d->arch.hvm.emuirq_pirq, NULL);
}
struct pirq *alloc_pirq_struct(struct domain *d)
/* do not store emuirq mappings for pt devices */
if ( emuirq != IRQ_PT )
{
- int err = radix_tree_insert(&d->arch.hvm_domain.emuirq_pirq, emuirq,
+ int err = radix_tree_insert(&d->arch.hvm.emuirq_pirq, emuirq,
radix_tree_int_to_ptr(pirq));
switch ( err )
case -EEXIST:
radix_tree_replace_slot(
radix_tree_lookup_slot(
- &d->arch.hvm_domain.emuirq_pirq, emuirq),
+ &d->arch.hvm.emuirq_pirq, emuirq),
radix_tree_int_to_ptr(pirq));
break;
default:
pirq_cleanup_check(info, d);
}
if ( emuirq != IRQ_PT )
- radix_tree_delete(&d->arch.hvm_domain.emuirq_pirq, emuirq);
+ radix_tree_delete(&d->arch.hvm.emuirq_pirq, emuirq);
done:
return ret;
paging_lock(d);
- dirty_vram = d->arch.hvm_domain.dirty_vram;
+ dirty_vram = d->arch.hvm.dirty_vram;
if ( !dirty_vram )
{
rc = -ENOMEM;
goto out;
}
- d->arch.hvm_domain.dirty_vram = dirty_vram;
+ d->arch.hvm.dirty_vram = dirty_vram;
}
if ( begin_pfn != dirty_vram->begin_pfn ||
{
paging_lock(d);
- dirty_vram = d->arch.hvm_domain.dirty_vram;
+ dirty_vram = d->arch.hvm.dirty_vram;
if ( dirty_vram )
{
/*
begin_pfn = dirty_vram->begin_pfn;
nr = dirty_vram->end_pfn - dirty_vram->begin_pfn;
xfree(dirty_vram);
- d->arch.hvm_domain.dirty_vram = NULL;
+ d->arch.hvm.dirty_vram = NULL;
}
paging_unlock(d);
d->arch.paging.mode &= ~PG_log_dirty;
- xfree(d->arch.hvm_domain.dirty_vram);
- d->arch.hvm_domain.dirty_vram = NULL;
+ XFREE(d->arch.hvm.dirty_vram);
out:
paging_unlock(d);
}
#define mem_sharing_enabled(d) \
- (is_hvm_domain(d) && (d)->arch.hvm_domain.mem_sharing_enabled)
+ (is_hvm_domain(d) && (d)->arch.hvm.mem_sharing_enabled)
static atomic_t nr_saved_mfns = ATOMIC_INIT(0);
static atomic_t nr_shared_mfns = ATOMIC_INIT(0);
/* Only HAP is supported */
rc = -ENODEV;
- if ( !hap_enabled(d) || !d->arch.hvm_domain.mem_sharing_enabled )
+ if ( !hap_enabled(d) || !d->arch.hvm.mem_sharing_enabled )
goto out;
switch ( mso.op )
if ( unlikely(need_iommu(d) && mec->u.enable) )
rc = -EXDEV;
else
- d->arch.hvm_domain.mem_sharing_enabled = mec->u.enable;
+ d->arch.hvm.mem_sharing_enabled = mec->u.enable;
}
break;
* calls now that we've torn down the bitmap */
d->arch.paging.mode &= ~PG_log_dirty;
- if (d->arch.hvm_domain.dirty_vram) {
- xfree(d->arch.hvm_domain.dirty_vram->sl1ma);
- xfree(d->arch.hvm_domain.dirty_vram->dirty_bitmap);
- xfree(d->arch.hvm_domain.dirty_vram);
- d->arch.hvm_domain.dirty_vram = NULL;
+ if ( d->arch.hvm.dirty_vram )
+ {
+ xfree(d->arch.hvm.dirty_vram->sl1ma);
+ xfree(d->arch.hvm.dirty_vram->dirty_bitmap);
+ XFREE(d->arch.hvm.dirty_vram);
}
out:
p2m_lock(p2m_get_hostp2m(d));
paging_lock(d);
- dirty_vram = d->arch.hvm_domain.dirty_vram;
+ dirty_vram = d->arch.hvm.dirty_vram;
if ( dirty_vram && (!nr ||
( begin_pfn != dirty_vram->begin_pfn
xfree(dirty_vram->sl1ma);
xfree(dirty_vram->dirty_bitmap);
xfree(dirty_vram);
- dirty_vram = d->arch.hvm_domain.dirty_vram = NULL;
+ dirty_vram = d->arch.hvm.dirty_vram = NULL;
}
if ( !nr )
goto out;
dirty_vram->begin_pfn = begin_pfn;
dirty_vram->end_pfn = end_pfn;
- d->arch.hvm_domain.dirty_vram = dirty_vram;
+ d->arch.hvm.dirty_vram = dirty_vram;
if ( (dirty_vram->sl1ma = xmalloc_array(paddr_t, nr)) == NULL )
goto out_dirty_vram;
xfree(dirty_vram->sl1ma);
out_dirty_vram:
xfree(dirty_vram);
- dirty_vram = d->arch.hvm_domain.dirty_vram = NULL;
+ dirty_vram = d->arch.hvm.dirty_vram = NULL;
out:
paging_unlock(d);
guest_l1e_t guest_entry = { guest_intpte };
shadow_l1e_t *sp = shadow_entry_ptr;
struct domain *d = v->domain;
- struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
+ struct sh_dirty_vram *dirty_vram = d->arch.hvm.dirty_vram;
gfn_t target_gfn = guest_l1e_get_gfn(guest_entry);
u32 pass_thru_flags;
u32 gflags, sflags;
if ( !mmio_mfn &&
(type = hvm_get_mem_pinned_cacheattr(d, target_gfn, 0)) >= 0 )
sflags |= pat_type_2_pte_flags(type);
- else if ( d->arch.hvm_domain.is_in_uc_mode )
+ else if ( d->arch.hvm.is_in_uc_mode )
sflags |= pat_type_2_pte_flags(PAT_TYPE_UNCACHABLE);
else
if ( iomem_access_permitted(d, mfn_x(target_mfn), mfn_x(target_mfn)) )
mfn_t mfn = shadow_l1e_get_mfn(new_sl1e);
int flags = shadow_l1e_get_flags(new_sl1e);
unsigned long gfn;
- struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
+ struct sh_dirty_vram *dirty_vram = d->arch.hvm.dirty_vram;
if ( !dirty_vram /* tracking disabled? */
|| !(flags & _PAGE_RW) /* read-only mapping? */
mfn_t mfn = shadow_l1e_get_mfn(old_sl1e);
int flags = shadow_l1e_get_flags(old_sl1e);
unsigned long gfn;
- struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
+ struct sh_dirty_vram *dirty_vram = d->arch.hvm.dirty_vram;
if ( !dirty_vram /* tracking disabled? */
|| !(flags & _PAGE_RW) /* read-only mapping? */
{
/*
* Only makes sense for vector-based callback, else HVM-IRQ logic
- * calls back into itself and deadlocks on hvm_domain.irq_lock.
+ * calls back into itself and deadlocks on hvm.irq_lock.
*/
if ( !is_hvm_pv_evtchn_domain(d) )
return -EINVAL;
ASSERT(e <= INT_MAX);
for ( i = s; i <= e; i++ )
- __clear_bit(i, d->arch.hvm_domain.io_bitmap);
+ __clear_bit(i, d->arch.hvm.io_bitmap);
return 0;
}
if ( is_hvm_domain(d) )
{
- bitmap_fill(d->arch.hvm_domain.io_bitmap, 0x10000);
+ bitmap_fill(d->arch.hvm.io_bitmap, 0x10000);
rc = rangeset_report_ranges(d->arch.ioport_caps, 0, 0x10000,
io_bitmap_cb, d);
BUG_ON(rc);
* Access to 1 byte RTC ports also needs to be trapped in order
* to keep consistency with PV.
*/
- __set_bit(0xcf8, d->arch.hvm_domain.io_bitmap);
- __set_bit(RTC_PORT(0), d->arch.hvm_domain.io_bitmap);
- __set_bit(RTC_PORT(1), d->arch.hvm_domain.io_bitmap);
+ __set_bit(0xcf8, d->arch.hvm.io_bitmap);
+ __set_bit(RTC_PORT(0), d->arch.hvm.io_bitmap);
+ __set_bit(RTC_PORT(1), d->arch.hvm.io_bitmap);
}
}
if ( is_hvm_domain(d) )
{
- struct pl_time *pl = v->domain->arch.hvm_domain.pl_time;
+ struct pl_time *pl = v->domain->arch.hvm.pl_time;
stime += pl->stime_offset + v->arch.hvm_vcpu.stime_offset;
if ( stime >= 0 )
if ( is_hvm_domain(d) )
{
if ( hvm_tsc_scaling_supported && !d->arch.vtsc )
- d->arch.hvm_domain.tsc_scaling_ratio =
+ d->arch.hvm.tsc_scaling_ratio =
hvm_get_tsc_scaling_ratio(d->arch.tsc_khz);
hvm_set_rdtsc_exiting(d, d->arch.vtsc);
* call set_tsc_offset() later from hvm_vcpu_reset_state() and they
* will sync their TSC to BSP's sync_tsc.
*/
- d->arch.hvm_domain.sync_tsc = rdtsc();
+ d->arch.hvm.sync_tsc = rdtsc();
hvm_set_tsc_offset(d->vcpu[0],
d->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset,
- d->arch.hvm_domain.sync_tsc);
+ d->arch.hvm.sync_tsc);
}
}
xen_event_channel_notification_t notification_fn)
{
int rc;
- unsigned long ring_gfn = d->arch.hvm_domain.params[param];
+ unsigned long ring_gfn = d->arch.hvm.params[param];
if ( !*ved )
*ved = xzalloc(struct vm_event_domain);
/* Prevent device assign if mem paging or mem sharing have been
* enabled for this domain */
if ( unlikely(!need_iommu(d) &&
- (d->arch.hvm_domain.mem_sharing_enabled ||
+ (d->arch.hvm.mem_sharing_enabled ||
vm_event_check_ring(d->vm_event_paging) ||
p2m_get_hostp2m(d)->global_logdirty)) )
return -EXDEV;
{
struct vpci_msix *msix;
- list_for_each_entry ( msix, &d->arch.hvm_domain.msix_tables, next )
+ list_for_each_entry ( msix, &d->arch.hvm.msix_tables, next )
{
const struct vpci_bar *bars = msix->pdev->vpci->header.bars;
unsigned int i;
if ( rc )
return rc;
- if ( list_empty(&d->arch.hvm_domain.msix_tables) )
+ if ( list_empty(&d->arch.hvm.msix_tables) )
register_mmio_handler(d, &vpci_msix_table_ops);
- list_add(&pdev->vpci->msix->next, &d->arch.hvm_domain.msix_tables);
+ list_add(&pdev->vpci->msix->next, &d->arch.hvm.msix_tables);
return 0;
}
/* Virtual MMU */
struct p2m_domain p2m;
- struct hvm_domain hvm_domain;
+ struct hvm_domain hvm;
struct vmmio vmmio;
#define is_pv_32bit_vcpu(v) (is_pv_32bit_domain((v)->domain))
#define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \
- (d)->arch.hvm_domain.irq->callback_via_type == HVMIRQ_callback_vector)
+ (d)->arch.hvm.irq->callback_via_type == HVMIRQ_callback_vector)
#define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain))
#define is_domain_direct_mapped(d) ((void)(d), 0)
union {
struct pv_domain pv;
- struct hvm_domain hvm_domain;
+ struct hvm_domain hvm;
};
struct paging_domain paging;
};
};
-#define hap_enabled(d) ((d)->arch.hvm_domain.hap_enabled)
+#define hap_enabled(d) ((d)->arch.hvm.hap_enabled)
#endif /* __ASM_X86_HVM_DOMAIN_H__ */
(1ULL << hvm_funcs.tsc_scaling.ratio_frac_bits)
#define hvm_tsc_scaling_ratio(d) \
- ((d)->arch.hvm_domain.tsc_scaling_ratio)
+ ((d)->arch.hvm.tsc_scaling_ratio)
u64 hvm_scale_tsc(const struct domain *d, u64 tsc);
u64 hvm_get_tsc_scaling_ratio(u32 gtsc_khz);
bool hvm_set_guest_bndcfgs(struct vcpu *v, u64 val);
#define has_hvm_params(d) \
- ((d)->arch.hvm_domain.params != NULL)
+ ((d)->arch.hvm.params != NULL)
#define viridian_feature_mask(d) \
- (has_hvm_params(d) ? (d)->arch.hvm_domain.params[HVM_PARAM_VIRIDIAN] : 0)
+ (has_hvm_params(d) ? (d)->arch.hvm.params[HVM_PARAM_VIRIDIAN] : 0)
#define is_viridian_domain(d) \
(is_hvm_domain(d) && (viridian_feature_mask(d) & HVMPV_base_freq))
#define arch_vcpu_block(v) ({ \
struct vcpu *v_ = (v); \
struct domain *d_ = v_->domain; \
- if ( is_hvm_domain(d_) && \
- (d_->arch.hvm_domain.pi_ops.vcpu_block) ) \
- d_->arch.hvm_domain.pi_ops.vcpu_block(v_); \
+ if ( is_hvm_domain(d_) && d_->arch.hvm.pi_ops.vcpu_block ) \
+ d_->arch.hvm.pi_ops.vcpu_block(v_); \
})
#endif /* __ASM_X86_HVM_HVM_H__ */
(((((dev)<<2) + ((dev)>>3) + (intx)) & 31) + 16)
#define hvm_pci_intx_link(dev, intx) \
(((dev) + (intx)) & 3)
-#define hvm_domain_irq(d) ((d)->arch.hvm_domain.irq)
+#define hvm_domain_irq(d) ((d)->arch.hvm.irq)
#define hvm_irq_size(cnt) offsetof(struct hvm_irq, gsi_assert_count[cnt])
#define hvm_isa_irq_to_gsi(isa_irq) ((isa_irq) ? : 2)
/* Nested HVM on/off per domain */
static inline bool nestedhvm_enabled(const struct domain *d)
{
- return is_hvm_domain(d) && d->arch.hvm_domain.params &&
- d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM];
+ return is_hvm_domain(d) && d->arch.hvm.params &&
+ d->arch.hvm.params[HVM_PARAM_NESTEDHVM];
}
/* Nested VCPU */
};
#define hvm_vioapic_size(cnt) offsetof(struct hvm_vioapic, redirtbl[cnt])
-#define domain_vioapic(d, i) ((d)->arch.hvm_domain.vioapic[i])
+#define domain_vioapic(d, i) ((d)->arch.hvm.vioapic[i])
#define vioapic_domain(v) ((v)->domain)
int vioapic_init(struct domain *d);
void pt_adjust_global_vcpu_target(struct vcpu *v);
#define pt_global_vcpu_target(d) \
- (is_hvm_domain(d) && (d)->arch.hvm_domain.i8259_target ? \
- (d)->arch.hvm_domain.i8259_target : \
+ (is_hvm_domain(d) && (d)->arch.hvm.i8259_target ? \
+ (d)->arch.hvm.i8259_target : \
(d)->vcpu ? (d)->vcpu[0] : NULL)
void pt_may_unmask_irq(struct domain *d, struct periodic_time *vlapic_pt);
#define domain_pirq_to_emuirq(d, pirq) pirq_field(d, pirq, \
arch.hvm.emuirq, IRQ_UNBOUND)
#define domain_emuirq_to_pirq(d, emuirq) ({ \
- void *__ret = radix_tree_lookup(&(d)->arch.hvm_domain.emuirq_pirq, \
- emuirq); \
+ void *__ret = radix_tree_lookup(&(d)->arch.hvm.emuirq_pirq, emuirq);\
__ret ? radix_tree_ptr_to_int(__ret) : IRQ_UNBOUND; \
})
#define IRQ_UNBOUND -1