The X86 domain structure already occupied PAGE_SIZE (4096).
Looking @ the memory layout of the structure, we could see that
overall most was occupied by (used the pahole tool on domain.o):
* sizeof(domain.arch) = sizeof(arch_domain) = 3328 bytes.
* sizeof(domain.arch.hvm_domain) = 2224 bytes.
* sizeof(domain.arch.hvm_domain.pl_time) = 1088 bytes.
This patch attempts to free some space, by making the pl_time
field in hvm_domain dynamically allocated.
We xzalloc/xfree it @ hvm_domain_initialise/hvm_domain_destroy.
After this change, the domain structure shrunk w/ 1152 bytes (>1K!).
Signed-off-by: Corneliu ZUZU <czuzu@bitdefender.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
#include <xen/event.h>
#include <xen/trace.h>
-#define domain_vhpet(x) (&(x)->arch.hvm_domain.pl_time.vhpet)
+#define domain_vhpet(x) (&(x)->arch.hvm_domain.pl_time->vhpet)
#define vcpu_vhpet(x) (domain_vhpet((x)->domain))
-#define vhpet_domain(x) (container_of((x), struct domain, \
- arch.hvm_domain.pl_time.vhpet))
+#define vhpet_domain(x) (container_of(x, struct pl_time, vhpet)->domain)
#define vhpet_vcpu(x) (pt_global_vcpu_target(vhpet_domain(x)))
#define HPET_BASE_ADDRESS 0xfed00000ULL
if ( rc != 0 )
goto fail0;
+ d->arch.hvm_domain.pl_time = xzalloc(struct pl_time);
d->arch.hvm_domain.params = xzalloc_array(uint64_t, HVM_NR_PARAMS);
d->arch.hvm_domain.io_handler = xzalloc_array(struct hvm_io_handler,
NR_IO_HANDLERS);
rc = -ENOMEM;
- if ( !d->arch.hvm_domain.params || !d->arch.hvm_domain.io_handler )
+ if ( !d->arch.hvm_domain.pl_time ||
+ !d->arch.hvm_domain.params || !d->arch.hvm_domain.io_handler )
goto fail1;
+ /* need link to containing domain */
+ d->arch.hvm_domain.pl_time->domain = d;
+
/* Set the default IO Bitmap. */
if ( is_hardware_domain(d) )
{
xfree(d->arch.hvm_domain.io_bitmap);
xfree(d->arch.hvm_domain.io_handler);
xfree(d->arch.hvm_domain.params);
+ xfree(d->arch.hvm_domain.pl_time);
fail0:
hvm_destroy_cacheattr_region_list(d);
return rc;
{
xfree(d->arch.hvm_domain.io_handler);
xfree(d->arch.hvm_domain.params);
+ xfree(d->arch.hvm_domain.pl_time);
hvm_destroy_cacheattr_region_list(d);
void hvm_acpi_power_button(struct domain *d)
{
- PMTState *s = &d->arch.hvm_domain.pl_time.vpmt;
+ PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
if ( !has_vpm(d) )
return;
void hvm_acpi_sleep_button(struct domain *d)
{
- PMTState *s = &d->arch.hvm_domain.pl_time.vpmt;
+ PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
if ( !has_vpm(d) )
return;
int dir, unsigned int port, unsigned int bytes, uint32_t *val)
{
struct vcpu *v = current;
- PMTState *s = &v->domain->arch.hvm_domain.pl_time.vpmt;
+ PMTState *s = &v->domain->arch.hvm_domain.pl_time->vpmt;
uint32_t addr, data, byte;
int i;
int dir, unsigned int port, unsigned int bytes, uint32_t *val)
{
struct vcpu *v = current;
- PMTState *s = &v->domain->arch.hvm_domain.pl_time.vpmt;
+ PMTState *s = &v->domain->arch.hvm_domain.pl_time->vpmt;
if ( bytes != 4 )
{
static int pmtimer_save(struct domain *d, hvm_domain_context_t *h)
{
- PMTState *s = &d->arch.hvm_domain.pl_time.vpmt;
+ PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
uint32_t x, msb = s->pm.tmr_val & TMR_VAL_MSB;
int rc;
static int pmtimer_load(struct domain *d, hvm_domain_context_t *h)
{
- PMTState *s = &d->arch.hvm_domain.pl_time.vpmt;
+ PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
if ( !has_vpm(d) )
return -ENODEV;
void pmtimer_init(struct vcpu *v)
{
- PMTState *s = &v->domain->arch.hvm_domain.pl_time.vpmt;
+ PMTState *s = &v->domain->arch.hvm_domain.pl_time->vpmt;
if ( !has_vpm(v->domain) )
return;
void pmtimer_deinit(struct domain *d)
{
- PMTState *s = &d->arch.hvm_domain.pl_time.vpmt;
+ PMTState *s = &d->arch.hvm_domain.pl_time->vpmt;
if ( !has_vpm(d) )
return;
return;
/* Reset the counter. */
- d->arch.hvm_domain.pl_time.vpmt.pm.tmr_val = 0;
+ d->arch.hvm_domain.pl_time->vpmt.pm.tmr_val = 0;
}
#define MIN_PER_HOUR 60
#define HOUR_PER_DAY 24
-#define domain_vrtc(x) (&(x)->arch.hvm_domain.pl_time.vrtc)
+#define domain_vrtc(x) (&(x)->arch.hvm_domain.pl_time->vrtc)
#define vcpu_vrtc(x) (domain_vrtc((x)->domain))
-#define vrtc_domain(x) (container_of((x), struct domain, \
- arch.hvm_domain.pl_time.vrtc))
+#define vrtc_domain(x) (container_of(x, struct pl_time, vrtc)->domain)
#define vrtc_vcpu(x) (pt_global_vcpu_target(vrtc_domain(x)))
#define epoch_year 1900
#define get_year(x) (x + epoch_year)
void hvm_init_guest_time(struct domain *d)
{
- struct pl_time *pl = &d->arch.hvm_domain.pl_time;
+ struct pl_time *pl = d->arch.hvm_domain.pl_time;
spin_lock_init(&pl->pl_time_lock);
pl->stime_offset = -(u64)get_s_time();
u64 hvm_get_guest_time_fixed(struct vcpu *v, u64 at_tsc)
{
- struct pl_time *pl = &v->domain->arch.hvm_domain.pl_time;
+ struct pl_time *pl = v->domain->arch.hvm_domain.pl_time;
u64 now;
/* Called from device models shared with PV guests. Be careful. */
pt_adjust_vcpu(&vpit->pt0, v);
spin_unlock(&vpit->lock);
- pl_time = &v->domain->arch.hvm_domain.pl_time;
+ pl_time = v->domain->arch.hvm_domain.pl_time;
spin_lock(&pl_time->vrtc.lock);
pt_adjust_vcpu(&pl_time->vrtc.pt, v);
if ( d )
{
pt_resume(&d->arch.vpit.pt0);
- pt_resume(&d->arch.hvm_domain.pl_time.vrtc.pt);
+ pt_resume(&d->arch.hvm_domain.pl_time->vrtc.pt);
for ( i = 0; i < HPET_TIMER_NUM; i++ )
- pt_resume(&d->arch.hvm_domain.pl_time.vhpet.pt[i]);
+ pt_resume(&d->arch.hvm_domain.pl_time->vhpet.pt[i]);
}
if ( vlapic_pt )
if ( is_hvm_domain(d) )
{
- struct pl_time *pl = &v->domain->arch.hvm_domain.pl_time;
+ struct pl_time *pl = v->domain->arch.hvm_domain.pl_time;
stime += pl->stime_offset + v->arch.hvm_vcpu.stime_offset;
if ( stime >= 0 )
/* Cached CF8 for guest PCI config cycles */
uint32_t pci_cf8;
- struct pl_time pl_time;
+ struct pl_time *pl_time;
struct hvm_io_handler *io_handler;
unsigned int io_handler_count;
/* Ensures monotonicity in appropriate timer modes. */
uint64_t last_guest_time;
spinlock_t pl_time_lock;
+ struct domain *domain;
};
void pt_save_timer(struct vcpu *v);