}
else
{
- if ( !d->is_pinned && !dom0_affinity_relaxed )
+ if ( !opt_dom0_vcpus_pin && !dom0_affinity_relaxed )
sched_set_affinity(v, &dom0_cpus, NULL);
sched_set_affinity(v, NULL, &dom0_cpus);
}
struct vcpu_get_physid cpu_id;
rc = -EINVAL;
- if ( !is_pinned_vcpu(v) )
+ if ( !is_hwdom_pinned_vcpu(v) )
break;
cpu_id.phys_id =
if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD ||
boot_cpu_data.x86 < 0x10 || boot_cpu_data.x86 > 0x17 )
break;
- if ( !is_hardware_domain(currd) || !is_pinned_vcpu(curr) )
+ if ( !is_hwdom_pinned_vcpu(curr) )
return X86EMUL_OKAY;
if ( (rdmsr_safe(MSR_AMD64_NB_CFG, temp) != 0) ||
((val ^ temp) & ~(1ULL << AMD64_NB_CFG_CF8_EXT_ENABLE_BIT)) )
if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD ||
boot_cpu_data.x86 < 0x10 || boot_cpu_data.x86 > 0x17 )
break;
- if ( !is_hardware_domain(currd) || !is_pinned_vcpu(curr) )
+ if ( !is_hwdom_pinned_vcpu(curr) )
return X86EMUL_OKAY;
if ( rdmsr_safe(MSR_FAM10H_MMIO_CONF_BASE, temp) != 0 )
break;
case MSR_IA32_UCODE_REV:
if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
break;
- if ( !is_hardware_domain(currd) || !is_pinned_vcpu(curr) )
+ if ( !is_hwdom_pinned_vcpu(curr) )
return X86EMUL_OKAY;
if ( rdmsr_safe(reg, temp) )
break;
case MSR_IA32_ENERGY_PERF_BIAS:
if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
break;
- if ( !is_hardware_domain(currd) || !is_pinned_vcpu(curr) ||
- wrmsr_safe(reg, val) == 0 )
+ if ( !is_hwdom_pinned_vcpu(curr) || wrmsr_safe(reg, val) == 0 )
return X86EMUL_OKAY;
break;
if ( hardware_domid < 0 || hardware_domid >= DOMID_FIRST_RESERVED )
panic("The value of hardware_dom must be a valid domain ID\n");
- d->is_pinned = opt_dom0_vcpus_pin;
d->disable_migrate = 1;
old_hwdom = hardware_domain;
hardware_domain = d;
* Initialize affinity settings. The idler, and potentially
* domain-0 VCPUs, are pinned onto their respective physical CPUs.
*/
- if ( is_idle_domain(d) || d->is_pinned )
+ if ( is_idle_domain(d) || (is_hardware_domain(d) && opt_dom0_vcpus_pin) )
sched_set_affinity(v, cpumask_of(processor), &cpumask_all);
else
sched_set_affinity(v, &cpumask_all, &cpumask_all);
cpumask_t online_affinity;
cpumask_t *online;
- if ( v->domain->is_pinned )
- return -EINVAL;
-
online = VCPU2ONLINE(v);
cpumask_and(&online_affinity, affinity, online);
if ( cpumask_empty(&online_affinity) )
bool is_console;
/* Is this a xenstore domain (not dom0)? */
bool is_xenstore;
- /* Domain's VCPUs are pinned 1:1 to physical CPUs? */
- bool is_pinned;
/* Non-migratable and non-restoreable? */
bool disable_migrate;
/* Is this guest being debugged by dom0? */
return is_hvm_domain(v->domain);
}
-#define is_pinned_vcpu(v) ((v)->domain->is_pinned || \
- cpumask_weight((v)->cpu_hard_affinity) == 1)
+static inline bool is_hwdom_pinned_vcpu(const struct vcpu *v)
+{
+ return (is_hardware_domain(v->domain) &&
+ cpumask_weight(v->cpu_hard_affinity) == 1);
+}
+
#ifdef CONFIG_HAS_PASSTHROUGH
#define has_iommu_pt(d) (dom_iommu(d)->status != IOMMU_STATUS_disabled)
#define need_iommu_pt_sync(d) (dom_iommu(d)->need_sync)