ia64/xen-unstable
changeset 19848:5839491bbf20 tip
[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.
don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author | Isaku Yamahata <yamahata@valinux.co.jp> |
---|---|
date | Mon Jun 29 11:26:05 2009 +0900 (2009-06-29) |
parents | 772e809e58ce |
children | |
files | xen/arch/ia64/vmx/viosapic.c xen/arch/ia64/vmx/vlsapic.c xen/arch/ia64/vmx/vmx_init.c xen/arch/ia64/vmx/vmx_vcpu_save.c xen/arch/ia64/xen/dom0_ops.c xen/arch/ia64/xen/dom_fw_dom0.c xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/hypercall.c xen/arch/ia64/xen/vhpt.c xen/include/asm-ia64/vcpumask.h |
line diff
1.1 --- a/xen/arch/ia64/vmx/viosapic.c Mon Jun 29 11:23:53 2009 +0900 1.2 +++ b/xen/arch/ia64/vmx/viosapic.c Mon Jun 29 11:26:05 2009 +0900 1.3 @@ -378,7 +378,7 @@ static int viosapic_load(struct domain * 1.4 return -EINVAL; 1.5 1.6 lowest_vcpu = NULL; 1.7 - if (viosapic_load.lowest_vcpu_id < MAX_VIRT_CPUS) 1.8 + if (viosapic_load.lowest_vcpu_id < d->max_vcpus) 1.9 lowest_vcpu = d->vcpu[viosapic_load.lowest_vcpu_id]; 1.10 else if (viosapic_load.lowest_vcpu_id != VIOSAPIC_INVALID_VCPU_ID) 1.11 return -EINVAL;
2.1 --- a/xen/arch/ia64/vmx/vlsapic.c Mon Jun 29 11:23:53 2009 +0900 2.2 +++ b/xen/arch/ia64/vmx/vlsapic.c Mon Jun 29 11:26:05 2009 +0900 2.3 @@ -153,7 +153,7 @@ static void vtm_reset(VCPU *vcpu) 2.4 2.5 if (vcpu->vcpu_id == 0) { 2.6 vtm_offset = 0UL - ia64_get_itc(); 2.7 - for (i = MAX_VIRT_CPUS - 1; i >= 0; i--) { 2.8 + for (i = d->max_vcpus - 1; i >= 0; i--) { 2.9 if ((v = d->vcpu[i]) != NULL) { 2.10 VMX(v, vtm).vtm_offset = vtm_offset; 2.11 VMX(v, vtm).last_itc = 0; 2.12 @@ -227,7 +227,7 @@ void vtm_set_itc(VCPU *vcpu, uint64_t ne 2.13 vtm = &VMX(vcpu, vtm); 2.14 if (vcpu->vcpu_id == 0) { 2.15 vtm_offset = new_itc - ia64_get_itc(); 2.16 - for (i = MAX_VIRT_CPUS - 1; i >= 0; i--) { 2.17 + for (i = d->max_vcpus - 1; i >= 0; i--) { 2.18 if ((v = d->vcpu[i]) != NULL) { 2.19 VMX(v, vtm).vtm_offset = vtm_offset; 2.20 VMX(v, vtm).last_itc = 0; 2.21 @@ -606,7 +606,7 @@ struct vcpu *lid_to_vcpu(struct domain * 2.22 int id = dest >> 8; 2.23 2.24 /* Fast look: assume EID=0 ID=vcpu_id. */ 2.25 - if ((dest & 0xff) == 0 && id < MAX_VIRT_CPUS) 2.26 + if ((dest & 0xff) == 0 && id < d->max_vcpus) 2.27 return d->vcpu[id]; 2.28 return NULL; 2.29 } 2.30 @@ -875,7 +875,7 @@ static int vlsapic_load(struct domain *d 2.31 int i; 2.32 2.33 vcpuid = hvm_load_instance(h); 2.34 - if (vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL) { 2.35 + if (vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL) { 2.36 gdprintk(XENLOG_ERR, 2.37 "%s: domain has no vlsapic %u\n", __func__, vcpuid); 2.38 return -EINVAL; 2.39 @@ -934,7 +934,7 @@ static int vtime_load(struct domain *d, 2.40 vtime_t *vtm; 2.41 2.42 vcpuid = hvm_load_instance(h); 2.43 - if (vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL) { 2.44 + if (vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL) { 2.45 gdprintk(XENLOG_ERR, 2.46 "%s: domain has no vtime %u\n", __func__, vcpuid); 2.47 return -EINVAL;
3.1 --- a/xen/arch/ia64/vmx/vmx_init.c Mon Jun 29 11:23:53 2009 +0900 3.2 +++ b/xen/arch/ia64/vmx/vmx_init.c Mon Jun 29 11:26:05 2009 +0900 3.3 @@ -623,7 +623,7 @@ int vmx_setup_platform(struct domain *d) 3.4 3.5 if (d->arch.is_sioemu) { 3.6 int i; 3.7 - for (i = 1; i < MAX_VIRT_CPUS; i++) 3.8 + for (i = 1; i < XEN_LEGACY_MAX_VCPUS; i++) 3.9 d->shared_info->vcpu_info[i].evtchn_upcall_mask = 1; 3.10 } 3.11
4.1 --- a/xen/arch/ia64/vmx/vmx_vcpu_save.c Mon Jun 29 11:23:53 2009 +0900 4.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu_save.c Mon Jun 29 11:26:05 2009 +0900 4.3 @@ -228,7 +228,7 @@ static int vmx_cpu_load(struct domain *d 4.4 struct pt_regs *regs; 4.5 4.6 vcpuid = hvm_load_instance(h); 4.7 - if (vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL) { 4.8 + if (vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL) { 4.9 gdprintk(XENLOG_ERR, 4.10 "%s: domain has no vcpu %u\n", __func__, vcpuid); 4.11 rc = -EINVAL; 4.12 @@ -278,7 +278,7 @@ static int vmx_vpd_load(struct domain *d 4.13 int i; 4.14 4.15 vcpuid = hvm_load_instance(h); 4.16 - if (vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL) { 4.17 + if (vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL) { 4.18 gdprintk(XENLOG_ERR, 4.19 "%s: domain has no vcpu %u\n", __func__, vcpuid); 4.20 rc = -EINVAL;
5.1 --- a/xen/arch/ia64/xen/dom0_ops.c Mon Jun 29 11:23:53 2009 +0900 5.2 +++ b/xen/arch/ia64/xen/dom0_ops.c Mon Jun 29 11:26:05 2009 +0900 5.3 @@ -233,7 +233,8 @@ long arch_do_domctl(xen_domctl_t *op, XE 5.4 goto sendtrigger_out; 5.5 5.6 ret = -ESRCH; 5.7 - if ( (v = d->vcpu[op->u.sendtrigger.vcpu]) == NULL ) 5.8 + if ( op->u.sendtrigger.vcpu >= d->max_vcpus || 5.9 + (v = d->vcpu[op->u.sendtrigger.vcpu]) == NULL ) 5.10 goto sendtrigger_out; 5.11 5.12 ret = 0;
6.1 --- a/xen/arch/ia64/xen/dom_fw_dom0.c Mon Jun 29 11:23:53 2009 +0900 6.2 +++ b/xen/arch/ia64/xen/dom_fw_dom0.c Mon Jun 29 11:26:05 2009 +0900 6.3 @@ -60,7 +60,7 @@ acpi_update_lsapic(struct acpi_subtable_ 6.4 if (!lsapic) 6.5 return -EINVAL; 6.6 6.7 - if (lsapic_nbr < MAX_VIRT_CPUS && dom0->vcpu[lsapic_nbr] != NULL) 6.8 + if (lsapic_nbr < dom0->max_vcpus && dom0->vcpu[lsapic_nbr] != NULL) 6.9 enable = 1; 6.10 else 6.11 enable = 0;
7.1 --- a/xen/arch/ia64/xen/domain.c Mon Jun 29 11:23:53 2009 +0900 7.2 +++ b/xen/arch/ia64/xen/domain.c Mon Jun 29 11:26:05 2009 +0900 7.3 @@ -2118,6 +2118,7 @@ int __init construct_dom0(struct domain 7.4 7.5 /* Sanity! */ 7.6 BUG_ON(d != dom0); 7.7 + BUG_ON(d->vcpu == NULL); 7.8 BUG_ON(d->vcpu[0] == NULL); 7.9 BUG_ON(v->is_initialised); 7.10 7.11 @@ -2222,7 +2223,7 @@ int __init construct_dom0(struct domain 7.12 // (we should be able to deal with this... later) 7.13 7.14 /* Mask all upcalls... */ 7.15 - for ( i = 1; i < MAX_VIRT_CPUS; i++ ) 7.16 + for ( i = 1; i < XEN_LEGACY_MAX_VCPUS; i++ ) 7.17 d->shared_info->vcpu_info[i].evtchn_upcall_mask = 1; 7.18 7.19 printk ("Dom0 max_vcpus=%d\n", dom0_max_vcpus);
8.1 --- a/xen/arch/ia64/xen/hypercall.c Mon Jun 29 11:23:53 2009 +0900 8.2 +++ b/xen/arch/ia64/xen/hypercall.c Mon Jun 29 11:26:05 2009 +0900 8.3 @@ -84,7 +84,7 @@ fw_hypercall_ipi (struct pt_regs *regs) 8.4 struct domain *d = current->domain; 8.5 8.6 /* Be sure the target exists. */ 8.7 - if (cpu > MAX_VIRT_CPUS) 8.8 + if (cpu >= d->max_vcpus) 8.9 return; 8.10 targ = d->vcpu[cpu]; 8.11 if (targ == NULL)
9.1 --- a/xen/arch/ia64/xen/vhpt.c Mon Jun 29 11:23:53 2009 +0900 9.2 +++ b/xen/arch/ia64/xen/vhpt.c Mon Jun 29 11:26:05 2009 +0900 9.3 @@ -231,7 +231,7 @@ domain_purge_swtc_entries_vcpu_dirty_mas 9.4 { 9.5 int vcpu; 9.6 9.7 - for_each_vcpu_mask(vcpu, vcpu_dirty_mask) { 9.8 + for_each_vcpu_mask(d, vcpu, vcpu_dirty_mask) { 9.9 struct vcpu* v = d->vcpu[vcpu]; 9.10 if (!v->is_initialised) 9.11 continue; 9.12 @@ -445,7 +445,7 @@ void 9.13 } 9.14 9.15 if (HAS_PERVCPU_VHPT(d)) { 9.16 - for_each_vcpu_mask(vcpu, entry->vcpu_dirty_mask) { 9.17 + for_each_vcpu_mask(d, vcpu, entry->vcpu_dirty_mask) { 9.18 v = d->vcpu[vcpu]; 9.19 if (!v->is_initialised) 9.20 continue;
10.1 --- a/xen/include/asm-ia64/vcpumask.h Mon Jun 29 11:23:53 2009 +0900 10.2 +++ b/xen/include/asm-ia64/vcpumask.h Mon Jun 29 11:26:05 2009 +0900 10.3 @@ -31,12 +31,12 @@ static inline int __next_vcpu(int n, con 10.4 } 10.5 10.6 #if MAX_VIRT_CPUS > 1 10.7 -#define for_each_vcpu_mask(vcpu, mask) \ 10.8 +#define for_each_vcpu_mask(d, vcpu, mask) \ 10.9 for ((vcpu) = first_vcpu(mask); \ 10.10 - (vcpu) < MAX_VIRT_CPUS; \ 10.11 + (vcpu) < d->max_vcpus; \ 10.12 (vcpu) = next_vcpu((vcpu), (mask))) 10.13 #else /* NR_CPUS == 1 */ 10.14 -#define for_each_vcpu_mask(vcpu, mask) for ((vcpu) = 0; (vcpu) < 1; (vcpu)++) 10.15 +#define for_each_vcpu_mask(d, vcpu, mask) for ((vcpu) = 0; (vcpu) < 1; (vcpu)++) 10.16 #endif /* NR_CPUS */ 10.17 10.18 #define vcpumask_scnprintf(buf, len, src) \