ia64/xen-unstable
changeset 17211:af33f2054f47
x86: Allow bitop functions to be applied only to fields of at least 4
bytes. Otherwise the 'longword' processor instructions used will
overlap with adjacent fields with unpredictable consequences.
This change requires some code fixup and just a few casts (mainly when
operating on guest-shared fields which cannot be changed, and which by
observation are clearly safe).
Based on ideas from Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
bytes. Otherwise the 'longword' processor instructions used will
overlap with adjacent fields with unpredictable consequences.
This change requires some code fixup and just a few casts (mainly when
operating on guest-shared fields which cannot be changed, and which by
observation are clearly safe).
Based on ideas from Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
line diff
1.1 --- a/xen/arch/x86/domain.c Mon Mar 10 22:51:57 2008 +0000 1.2 +++ b/xen/arch/x86/domain.c Sun Mar 16 14:11:34 2008 +0000 1.3 @@ -830,7 +830,7 @@ unmap_vcpu_info(struct vcpu *v) 1.4 mfn = v->arch.vcpu_info_mfn; 1.5 unmap_domain_page_global(v->vcpu_info); 1.6 1.7 - v->vcpu_info = shared_info_addr(d, vcpu_info[v->vcpu_id]); 1.8 + v->vcpu_info = (void *)&shared_info(d, vcpu_info[v->vcpu_id]); 1.9 v->arch.vcpu_info_mfn = INVALID_MFN; 1.10 1.11 put_page_and_type(mfn_to_page(mfn)); 1.12 @@ -888,7 +888,7 @@ map_vcpu_info(struct vcpu *v, unsigned l 1.13 */ 1.14 vcpu_info(v, evtchn_upcall_pending) = 1; 1.15 for ( i = 0; i < BITS_PER_GUEST_LONG(d); i++ ) 1.16 - set_bit(i, vcpu_info_addr(v, evtchn_pending_sel)); 1.17 + set_bit(i, &vcpu_info(v, evtchn_pending_sel)); 1.18 1.19 /* 1.20 * Only bother to update time for the current vcpu. If we're
2.1 --- a/xen/arch/x86/hvm/hvm.c Mon Mar 10 22:51:57 2008 +0000 2.2 +++ b/xen/arch/x86/hvm/hvm.c Sun Mar 16 14:11:34 2008 +0000 2.3 @@ -59,8 +59,8 @@ integer_param("hvm_debug", opt_hvm_debug 2.4 struct hvm_function_table hvm_funcs __read_mostly; 2.5 2.6 /* I/O permission bitmap is globally shared by all HVM guests. */ 2.7 -char __attribute__ ((__section__ (".bss.page_aligned"))) 2.8 - hvm_io_bitmap[3*PAGE_SIZE]; 2.9 +unsigned long __attribute__ ((__section__ (".bss.page_aligned"))) 2.10 + hvm_io_bitmap[3*PAGE_SIZE/BYTES_PER_LONG]; 2.11 2.12 void hvm_enable(struct hvm_function_table *fns) 2.13 {
3.1 --- a/xen/arch/x86/hvm/svm/vmcb.c Mon Mar 10 22:51:57 2008 +0000 3.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c Sun Mar 16 14:11:34 2008 +0000 3.3 @@ -80,27 +80,27 @@ struct host_save_area *alloc_host_save_a 3.4 3.5 void svm_disable_intercept_for_msr(struct vcpu *v, u32 msr) 3.6 { 3.7 - char *msr_bitmap = v->arch.hvm_svm.msrpm; 3.8 + unsigned long *msr_bitmap = v->arch.hvm_svm.msrpm; 3.9 3.10 /* 3.11 * See AMD64 Programmers Manual, Vol 2, Section 15.10 (MSR-Bitmap Address). 3.12 */ 3.13 if ( msr <= 0x1fff ) 3.14 { 3.15 - __clear_bit(msr*2, msr_bitmap + 0x000); 3.16 - __clear_bit(msr*2+1, msr_bitmap + 0x000); 3.17 + __clear_bit(msr*2, msr_bitmap + 0x000/BYTES_PER_LONG); 3.18 + __clear_bit(msr*2+1, msr_bitmap + 0x000/BYTES_PER_LONG); 3.19 } 3.20 else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) 3.21 { 3.22 msr &= 0x1fff; 3.23 - __clear_bit(msr*2, msr_bitmap + 0x800); 3.24 - __clear_bit(msr*2+1, msr_bitmap + 0x800); 3.25 + __clear_bit(msr*2, msr_bitmap + 0x800/BYTES_PER_LONG); 3.26 + __clear_bit(msr*2+1, msr_bitmap + 0x800/BYTES_PER_LONG); 3.27 } 3.28 else if ( (msr >= 0xc001000) && (msr <= 0xc0011fff) ) 3.29 { 3.30 msr &= 0x1fff; 3.31 - __clear_bit(msr*2, msr_bitmap + 0x1000); 3.32 - __clear_bit(msr*2+1, msr_bitmap + 0x1000); 3.33 + __clear_bit(msr*2, msr_bitmap + 0x1000/BYTES_PER_LONG); 3.34 + __clear_bit(msr*2+1, msr_bitmap + 0x1000/BYTES_PER_LONG); 3.35 } 3.36 } 3.37
4.1 --- a/xen/arch/x86/hvm/vlapic.c Mon Mar 10 22:51:57 2008 +0000 4.2 +++ b/xen/arch/x86/hvm/vlapic.c Sun Mar 16 14:11:34 2008 +0000 4.3 @@ -83,15 +83,17 @@ static unsigned int vlapic_lvt_mask[VLAP 4.4 */ 4.5 4.6 #define VEC_POS(v) ((v)%32) 4.7 -#define REG_POS(v) (((v)/32)* 0x10) 4.8 -#define vlapic_test_and_set_vector(vec, bitmap) \ 4.9 - test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)) 4.10 -#define vlapic_test_and_clear_vector(vec, bitmap) \ 4.11 - test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)) 4.12 -#define vlapic_set_vector(vec, bitmap) \ 4.13 - set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)) 4.14 -#define vlapic_clear_vector(vec, bitmap) \ 4.15 - clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)) 4.16 +#define REG_POS(v) (((v)/32) * 0x10) 4.17 +#define vlapic_test_and_set_vector(vec, bitmap) \ 4.18 + test_and_set_bit(VEC_POS(vec), \ 4.19 + (unsigned long *)((bitmap) + REG_POS(vec))) 4.20 +#define vlapic_test_and_clear_vector(vec, bitmap) \ 4.21 + test_and_clear_bit(VEC_POS(vec), \ 4.22 + (unsigned long *)((bitmap) + REG_POS(vec))) 4.23 +#define vlapic_set_vector(vec, bitmap) \ 4.24 + set_bit(VEC_POS(vec), (unsigned long *)((bitmap) + REG_POS(vec))) 4.25 +#define vlapic_clear_vector(vec, bitmap) \ 4.26 + clear_bit(VEC_POS(vec), (unsigned long *)((bitmap) + REG_POS(vec))) 4.27 4.28 static int vlapic_find_highest_vector(void *bitmap) 4.29 { 4.30 @@ -112,12 +114,14 @@ static int vlapic_find_highest_vector(vo 4.31 4.32 static int vlapic_test_and_set_irr(int vector, struct vlapic *vlapic) 4.33 { 4.34 - return vlapic_test_and_set_vector(vector, &vlapic->regs->data[APIC_IRR]); 4.35 + return vlapic_test_and_set_vector( 4.36 + vector, (unsigned long *)&vlapic->regs->data[APIC_IRR]); 4.37 } 4.38 4.39 static void vlapic_clear_irr(int vector, struct vlapic *vlapic) 4.40 { 4.41 - vlapic_clear_vector(vector, &vlapic->regs->data[APIC_IRR]); 4.42 + vlapic_clear_vector( 4.43 + vector, (unsigned long *)&vlapic->regs->data[APIC_IRR]); 4.44 } 4.45 4.46 static int vlapic_find_highest_irr(struct vlapic *vlapic)
5.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c Mon Mar 10 22:51:57 2008 +0000 5.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c Sun Mar 16 14:11:34 2008 +0000 5.3 @@ -413,7 +413,7 @@ static void vmx_set_host_env(struct vcpu 5.4 5.5 void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr) 5.6 { 5.7 - char *msr_bitmap = v->arch.hvm_vmx.msr_bitmap; 5.8 + unsigned long *msr_bitmap = v->arch.hvm_vmx.msr_bitmap; 5.9 5.10 /* VMX MSR bitmap supported? */ 5.11 if ( msr_bitmap == NULL ) 5.12 @@ -426,14 +426,14 @@ void vmx_disable_intercept_for_msr(struc 5.13 */ 5.14 if ( msr <= 0x1fff ) 5.15 { 5.16 - __clear_bit(msr, msr_bitmap + 0x000); /* read-low */ 5.17 - __clear_bit(msr, msr_bitmap + 0x800); /* write-low */ 5.18 + __clear_bit(msr, msr_bitmap + 0x000/BYTES_PER_LONG); /* read-low */ 5.19 + __clear_bit(msr, msr_bitmap + 0x800/BYTES_PER_LONG); /* write-low */ 5.20 } 5.21 else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) 5.22 { 5.23 msr &= 0x1fff; 5.24 - __clear_bit(msr, msr_bitmap + 0x400); /* read-high */ 5.25 - __clear_bit(msr, msr_bitmap + 0xc00); /* write-high */ 5.26 + __clear_bit(msr, msr_bitmap + 0x400/BYTES_PER_LONG); /* read-high */ 5.27 + __clear_bit(msr, msr_bitmap + 0xc00/BYTES_PER_LONG); /* write-high */ 5.28 } 5.29 } 5.30 5.31 @@ -456,7 +456,7 @@ static int construct_vmcs(struct vcpu *v 5.32 /* MSR access bitmap. */ 5.33 if ( cpu_has_vmx_msr_bitmap ) 5.34 { 5.35 - char *msr_bitmap = alloc_xenheap_page(); 5.36 + unsigned long *msr_bitmap = alloc_xenheap_page(); 5.37 5.38 if ( msr_bitmap == NULL ) 5.39 return -ENOMEM;
6.1 --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c Mon Mar 10 22:51:57 2008 +0000 6.2 +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c Sun Mar 16 14:11:34 2008 +0000 6.3 @@ -101,7 +101,7 @@ static int is_core2_vpmu_msr(u32 msr_ind 6.4 return 0; 6.5 } 6.6 6.7 -static void core2_vpmu_set_msr_bitmap(char *msr_bitmap) 6.8 +static void core2_vpmu_set_msr_bitmap(unsigned long *msr_bitmap) 6.9 { 6.10 int i; 6.11 6.12 @@ -109,12 +109,14 @@ static void core2_vpmu_set_msr_bitmap(ch 6.13 for ( i = 0; i < core2_counters.num; i++ ) 6.14 { 6.15 clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap); 6.16 - clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap+0x800); 6.17 + clear_bit(msraddr_to_bitpos(core2_counters.msr[i]), 6.18 + msr_bitmap + 0x800/BYTES_PER_LONG); 6.19 } 6.20 for ( i = 0; i < core2_get_pmc_count(); i++ ) 6.21 { 6.22 clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap); 6.23 - clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap+0x800); 6.24 + clear_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), 6.25 + msr_bitmap + 0x800/BYTES_PER_LONG); 6.26 } 6.27 6.28 /* Allow Read PMU Non-global Controls Directly. */ 6.29 @@ -124,19 +126,21 @@ static void core2_vpmu_set_msr_bitmap(ch 6.30 clear_bit(msraddr_to_bitpos(MSR_P6_EVNTSEL0+i), msr_bitmap); 6.31 } 6.32 6.33 -static void core2_vpmu_unset_msr_bitmap(char *msr_bitmap) 6.34 +static void core2_vpmu_unset_msr_bitmap(unsigned long *msr_bitmap) 6.35 { 6.36 int i; 6.37 6.38 for ( i = 0; i < core2_counters.num; i++ ) 6.39 { 6.40 set_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap); 6.41 - set_bit(msraddr_to_bitpos(core2_counters.msr[i]), msr_bitmap+0x800); 6.42 + set_bit(msraddr_to_bitpos(core2_counters.msr[i]), 6.43 + msr_bitmap + 0x800/BYTES_PER_LONG); 6.44 } 6.45 for ( i = 0; i < core2_get_pmc_count(); i++ ) 6.46 { 6.47 set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap); 6.48 - set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), msr_bitmap+0x800); 6.49 + set_bit(msraddr_to_bitpos(MSR_IA32_PERFCTR0+i), 6.50 + msr_bitmap + 0x800/BYTES_PER_LONG); 6.51 } 6.52 for ( i = 0; i < core2_ctrls.num; i++ ) 6.53 set_bit(msraddr_to_bitpos(core2_ctrls.msr[i]), msr_bitmap);
7.1 --- a/xen/arch/x86/irq.c Mon Mar 10 22:51:57 2008 +0000 7.2 +++ b/xen/arch/x86/irq.c Sun Mar 16 14:11:34 2008 +0000 7.3 @@ -362,13 +362,12 @@ int pirq_guest_eoi(struct domain *d, int 7.4 int pirq_guest_unmask(struct domain *d) 7.5 { 7.6 unsigned int irq; 7.7 - shared_info_t *s = d->shared_info; 7.8 7.9 for ( irq = find_first_bit(d->pirq_mask, NR_IRQS); 7.10 irq < NR_IRQS; 7.11 irq = find_next_bit(d->pirq_mask, NR_IRQS, irq+1) ) 7.12 { 7.13 - if ( !test_bit(d->pirq_to_evtchn[irq], __shared_info_addr(d, s, evtchn_mask)) ) 7.14 + if ( !test_bit(d->pirq_to_evtchn[irq], &shared_info(d, evtchn_mask)) ) 7.15 __pirq_guest_eoi(d, irq); 7.16 } 7.17 7.18 @@ -660,13 +659,13 @@ static void dump_irqs(unsigned char key) 7.19 printk("%u(%c%c%c%c)", 7.20 d->domain_id, 7.21 (test_bit(d->pirq_to_evtchn[irq], 7.22 - shared_info_addr(d, evtchn_pending)) ? 7.23 + &shared_info(d, evtchn_pending)) ? 7.24 'P' : '-'), 7.25 (test_bit(d->pirq_to_evtchn[irq]/BITS_PER_GUEST_LONG(d), 7.26 - vcpu_info_addr(d->vcpu[0], evtchn_pending_sel)) ? 7.27 + &vcpu_info(d->vcpu[0], evtchn_pending_sel)) ? 7.28 'S' : '-'), 7.29 (test_bit(d->pirq_to_evtchn[irq], 7.30 - shared_info_addr(d, evtchn_mask)) ? 7.31 + &shared_info(d, evtchn_mask)) ? 7.32 'M' : '-'), 7.33 (test_bit(irq, d->pirq_mask) ? 7.34 'M' : '-'));
8.1 --- a/xen/arch/x86/mm/paging.c Mon Mar 10 22:51:57 2008 +0000 8.2 +++ b/xen/arch/x86/mm/paging.c Sun Mar 16 14:11:34 2008 +0000 8.3 @@ -114,7 +114,8 @@ static mfn_t paging_new_log_dirty_page(s 8.4 return mfn; 8.5 } 8.6 8.7 -static mfn_t paging_new_log_dirty_leaf(struct domain *d, uint8_t **leaf_p) 8.8 +static mfn_t paging_new_log_dirty_leaf( 8.9 + struct domain *d, unsigned long **leaf_p) 8.10 { 8.11 mfn_t mfn = paging_new_log_dirty_page(d, (void **)leaf_p); 8.12 if ( mfn_valid(mfn) ) 8.13 @@ -264,7 +265,7 @@ void paging_mark_dirty(struct domain *d, 8.14 mfn_t gmfn; 8.15 int changed; 8.16 mfn_t mfn, *l4, *l3, *l2; 8.17 - uint8_t *l1; 8.18 + unsigned long *l1; 8.19 int i1, i2, i3, i4; 8.20 8.21 gmfn = _mfn(guest_mfn); 8.22 @@ -341,7 +342,7 @@ int paging_log_dirty_op(struct domain *d 8.23 int rv = 0, clean = 0, peek = 1; 8.24 unsigned long pages = 0; 8.25 mfn_t *l4, *l3, *l2; 8.26 - uint8_t *l1; 8.27 + unsigned long *l1; 8.28 int i4, i3, i2; 8.29 8.30 domain_pause(d); 8.31 @@ -399,7 +400,7 @@ int paging_log_dirty_op(struct domain *d 8.32 (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES); 8.33 i2++ ) 8.34 { 8.35 - static uint8_t zeroes[PAGE_SIZE]; 8.36 + static unsigned long zeroes[PAGE_SIZE/BYTES_PER_LONG]; 8.37 unsigned int bytes = PAGE_SIZE; 8.38 l1 = ((l2 && mfn_valid(l2[i2])) ? 8.39 map_domain_page(mfn_x(l2[i2])) : zeroes); 8.40 @@ -408,7 +409,7 @@ int paging_log_dirty_op(struct domain *d 8.41 if ( likely(peek) ) 8.42 { 8.43 if ( copy_to_guest_offset(sc->dirty_bitmap, pages >> 3, 8.44 - l1, bytes) != 0 ) 8.45 + (uint8_t *)l1, bytes) != 0 ) 8.46 { 8.47 rv = -EFAULT; 8.48 goto out;
9.1 --- a/xen/arch/x86/mm/shadow/private.h Mon Mar 10 22:51:57 2008 +0000 9.2 +++ b/xen/arch/x86/mm/shadow/private.h Sun Mar 16 14:11:34 2008 +0000 9.3 @@ -483,7 +483,7 @@ sh_mfn_is_dirty(struct domain *d, mfn_t 9.4 { 9.5 unsigned long pfn; 9.6 mfn_t mfn, *l4, *l3, *l2; 9.7 - uint8_t *l1; 9.8 + unsigned long *l1; 9.9 int rv; 9.10 9.11 ASSERT(shadow_mode_log_dirty(d));
10.1 --- a/xen/common/domain.c Mon Mar 10 22:51:57 2008 +0000 10.2 +++ b/xen/common/domain.c Sun Mar 16 14:11:34 2008 +0000 10.3 @@ -154,7 +154,7 @@ struct vcpu *alloc_vcpu( 10.4 if ( !is_idle_domain(d) ) 10.5 { 10.6 set_bit(_VPF_down, &v->pause_flags); 10.7 - v->vcpu_info = shared_info_addr(d, vcpu_info[vcpu_id]); 10.8 + v->vcpu_info = (void *)&shared_info(d, vcpu_info[vcpu_id]); 10.9 } 10.10 10.11 if ( sched_init_vcpu(v, cpu_id) != 0 )
11.1 --- a/xen/common/event_channel.c Mon Mar 10 22:51:57 2008 +0000 11.2 +++ b/xen/common/event_channel.c Sun Mar 16 14:11:34 2008 +0000 11.3 @@ -539,7 +539,6 @@ out: 11.4 void evtchn_set_pending(struct vcpu *v, int port) 11.5 { 11.6 struct domain *d = v->domain; 11.7 - shared_info_t *s = d->shared_info; 11.8 11.9 /* 11.10 * The following bit operations must happen in strict order. 11.11 @@ -548,12 +547,12 @@ void evtchn_set_pending(struct vcpu *v, 11.12 * others may require explicit memory barriers. 11.13 */ 11.14 11.15 - if ( test_and_set_bit(port, __shared_info_addr(d, s, evtchn_pending)) ) 11.16 + if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) ) 11.17 return; 11.18 11.19 - if ( !test_bit (port, __shared_info_addr(d, s, evtchn_mask)) && 11.20 + if ( !test_bit (port, &shared_info(d, evtchn_mask)) && 11.21 !test_and_set_bit(port / BITS_PER_GUEST_LONG(d), 11.22 - vcpu_info_addr(v, evtchn_pending_sel)) ) 11.23 + &vcpu_info(v, evtchn_pending_sel)) ) 11.24 { 11.25 vcpu_mark_events_pending(v); 11.26 } 11.27 @@ -750,7 +749,6 @@ long evtchn_bind_vcpu(unsigned int port, 11.28 static long evtchn_unmask(evtchn_unmask_t *unmask) 11.29 { 11.30 struct domain *d = current->domain; 11.31 - shared_info_t *s = d->shared_info; 11.32 int port = unmask->port; 11.33 struct vcpu *v; 11.34 11.35 @@ -768,10 +766,10 @@ static long evtchn_unmask(evtchn_unmask_ 11.36 * These operations must happen in strict order. Based on 11.37 * include/xen/event.h:evtchn_set_pending(). 11.38 */ 11.39 - if ( test_and_clear_bit(port, __shared_info_addr(d, s, evtchn_mask)) && 11.40 - test_bit (port, __shared_info_addr(d, s, evtchn_pending)) && 11.41 + if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) && 11.42 + test_bit (port, &shared_info(d, evtchn_pending)) && 11.43 !test_and_set_bit (port / BITS_PER_GUEST_LONG(d), 11.44 - vcpu_info_addr(v, evtchn_pending_sel)) ) 11.45 + &vcpu_info(v, evtchn_pending_sel)) ) 11.46 { 11.47 vcpu_mark_events_pending(v); 11.48 }
12.1 --- a/xen/common/keyhandler.c Mon Mar 10 22:51:57 2008 +0000 12.2 +++ b/xen/common/keyhandler.c Sun Mar 16 14:11:34 2008 +0000 12.3 @@ -201,12 +201,12 @@ static void dump_domains(unsigned char k 12.4 printk(" Notifying guest (virq %d, port %d, stat %d/%d/%d)\n", 12.5 VIRQ_DEBUG, v->virq_to_evtchn[VIRQ_DEBUG], 12.6 test_bit(v->virq_to_evtchn[VIRQ_DEBUG], 12.7 - shared_info_addr(d, evtchn_pending)), 12.8 + &shared_info(d, evtchn_pending)), 12.9 test_bit(v->virq_to_evtchn[VIRQ_DEBUG], 12.10 - shared_info_addr(d, evtchn_mask)), 12.11 + &shared_info(d, evtchn_mask)), 12.12 test_bit(v->virq_to_evtchn[VIRQ_DEBUG] / 12.13 BITS_PER_GUEST_LONG(d), 12.14 - vcpu_info_addr(v, evtchn_pending_sel))); 12.15 + &vcpu_info(v, evtchn_pending_sel))); 12.16 send_guest_vcpu_virq(v, VIRQ_DEBUG); 12.17 } 12.18 }
13.1 --- a/xen/common/schedule.c Mon Mar 10 22:51:57 2008 +0000 13.2 +++ b/xen/common/schedule.c Sun Mar 16 14:11:34 2008 +0000 13.3 @@ -365,7 +365,7 @@ static long do_poll(struct sched_poll *s 13.4 goto out; 13.5 13.6 rc = 0; 13.7 - if ( test_bit(port, shared_info_addr(d, evtchn_pending)) ) 13.8 + if ( test_bit(port, &shared_info(d, evtchn_pending)) ) 13.9 goto out; 13.10 } 13.11
14.1 --- a/xen/drivers/passthrough/vtd/iommu.c Mon Mar 10 22:51:57 2008 +0000 14.2 +++ b/xen/drivers/passthrough/vtd/iommu.c Sun Mar 16 14:11:34 2008 +0000 14.3 @@ -39,8 +39,8 @@ 14.4 #define domain_iommu_domid(d) ((d)->arch.hvm_domain.hvm_iommu.iommu_domid) 14.5 14.6 static spinlock_t domid_bitmap_lock; /* protect domain id bitmap */ 14.7 -static int domid_bitmap_size; /* domain id bitmap size in bit */ 14.8 -static void *domid_bitmap; /* iommu domain id bitmap */ 14.9 +static int domid_bitmap_size; /* domain id bitmap size in bits */ 14.10 +static unsigned long *domid_bitmap; /* iommu domain id bitmap */ 14.11 14.12 #define DID_FIELD_WIDTH 16 14.13 #define DID_HIGH_OFFSET 8 14.14 @@ -1885,7 +1885,8 @@ int iommu_setup(void) 14.15 14.16 /* Allocate domain id bitmap, and set bit 0 as reserved */ 14.17 domid_bitmap_size = cap_ndoms(iommu->cap); 14.18 - domid_bitmap = xmalloc_bytes(domid_bitmap_size / 8); 14.19 + domid_bitmap = xmalloc_array(unsigned long, 14.20 + BITS_TO_LONGS(domid_bitmap_size)); 14.21 if ( domid_bitmap == NULL ) 14.22 goto error; 14.23 memset(domid_bitmap, 0, domid_bitmap_size / 8);
15.1 --- a/xen/drivers/video/vesa.c Mon Mar 10 22:51:57 2008 +0000 15.2 +++ b/xen/drivers/video/vesa.c Sun Mar 16 14:11:34 2008 +0000 15.3 @@ -219,7 +219,7 @@ static void vesa_show_line( 15.4 ((font->width + 7) >> 3)); 15.5 for ( b = font->width; b--; ) 15.6 { 15.7 - pixel = test_bit(b, bits) ? pixel_on : 0; 15.8 + pixel = (*bits & (1u<<b)) ? pixel_on : 0; 15.9 memcpy(ptr, &pixel, bpp); 15.10 ptr += bpp; 15.11 }
16.1 --- a/xen/include/asm-x86/bitops.h Mon Mar 10 22:51:57 2008 +0000 16.2 +++ b/xen/include/asm-x86/bitops.h Sun Mar 16 14:11:34 2008 +0000 16.3 @@ -25,6 +25,9 @@ 16.4 #define ADDR (*(volatile long *) addr) 16.5 #define CONST_ADDR (*(const volatile long *) addr) 16.6 16.7 +extern void __bitop_bad_size(void); 16.8 +#define bitop_bad_size(addr) (min(sizeof(*(addr)), __alignof__(*(addr))) < 4) 16.9 + 16.10 /** 16.11 * set_bit - Atomically set a bit in memory 16.12 * @nr: the bit to set 16.13 @@ -35,13 +38,18 @@ 16.14 * Note that @nr may be almost arbitrarily large; this function is not 16.15 * restricted to acting on a single-word quantity. 16.16 */ 16.17 -static __inline__ void set_bit(int nr, volatile void * addr) 16.18 +static inline void set_bit(int nr, volatile void *addr) 16.19 { 16.20 - __asm__ __volatile__( LOCK_PREFIX 16.21 - "btsl %1,%0" 16.22 - :"=m" (ADDR) 16.23 - :"dIr" (nr), "m" (ADDR) : "memory"); 16.24 + asm volatile ( 16.25 + LOCK_PREFIX 16.26 + "btsl %1,%0" 16.27 + : "=m" (ADDR) 16.28 + : "Ir" (nr), "m" (ADDR) : "memory"); 16.29 } 16.30 +#define set_bit(nr, addr) ({ \ 16.31 + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ 16.32 + set_bit(nr, addr); \ 16.33 +}) 16.34 16.35 /** 16.36 * __set_bit - Set a bit in memory 16.37 @@ -52,13 +60,17 @@ static __inline__ void set_bit(int nr, v 16.38 * If it's called on the same region of memory simultaneously, the effect 16.39 * may be that only one operation succeeds. 16.40 */ 16.41 -static __inline__ void __set_bit(int nr, volatile void * addr) 16.42 +static inline void __set_bit(int nr, volatile void *addr) 16.43 { 16.44 - __asm__( 16.45 - "btsl %1,%0" 16.46 - :"=m" (ADDR) 16.47 - :"dIr" (nr), "m" (ADDR) : "memory"); 16.48 + asm volatile ( 16.49 + "btsl %1,%0" 16.50 + : "=m" (ADDR) 16.51 + : "Ir" (nr), "m" (ADDR) : "memory"); 16.52 } 16.53 +#define __set_bit(nr, addr) ({ \ 16.54 + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ 16.55 + __set_bit(nr, addr); \ 16.56 +}) 16.57 16.58 /** 16.59 * clear_bit - Clears a bit in memory 16.60 @@ -70,13 +82,18 @@ static __inline__ void __set_bit(int nr, 16.61 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 16.62 * in order to ensure changes are visible on other processors. 16.63 */ 16.64 -static __inline__ void clear_bit(int nr, volatile void * addr) 16.65 +static inline void clear_bit(int nr, volatile void *addr) 16.66 { 16.67 - __asm__ __volatile__( LOCK_PREFIX 16.68 - "btrl %1,%0" 16.69 - :"=m" (ADDR) 16.70 - :"dIr" (nr), "m" (ADDR) : "memory"); 16.71 + asm volatile ( 16.72 + LOCK_PREFIX 16.73 + "btrl %1,%0" 16.74 + : "=m" (ADDR) 16.75 + : "Ir" (nr), "m" (ADDR) : "memory"); 16.76 } 16.77 +#define clear_bit(nr, addr) ({ \ 16.78 + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ 16.79 + clear_bit(nr, addr); \ 16.80 +}) 16.81 16.82 /** 16.83 * __clear_bit - Clears a bit in memory 16.84 @@ -87,16 +104,20 @@ static __inline__ void clear_bit(int nr, 16.85 * If it's called on the same region of memory simultaneously, the effect 16.86 * may be that only one operation succeeds. 16.87 */ 16.88 -static __inline__ void __clear_bit(int nr, volatile void * addr) 16.89 +static inline void __clear_bit(int nr, volatile void *addr) 16.90 { 16.91 - __asm__( 16.92 - "btrl %1,%0" 16.93 - :"=m" (ADDR) 16.94 - :"dIr" (nr), "m" (ADDR) : "memory"); 16.95 + asm volatile ( 16.96 + "btrl %1,%0" 16.97 + : "=m" (ADDR) 16.98 + : "Ir" (nr), "m" (ADDR) : "memory"); 16.99 } 16.100 +#define __clear_bit(nr, addr) ({ \ 16.101 + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ 16.102 + __clear_bit(nr, addr); \ 16.103 +}) 16.104 16.105 -#define smp_mb__before_clear_bit() barrier() 16.106 -#define smp_mb__after_clear_bit() barrier() 16.107 +#define smp_mb__before_clear_bit() barrier() 16.108 +#define smp_mb__after_clear_bit() barrier() 16.109 16.110 /** 16.111 * __change_bit - Toggle a bit in memory 16.112 @@ -107,13 +128,17 @@ static __inline__ void __clear_bit(int n 16.113 * If it's called on the same region of memory simultaneously, the effect 16.114 * may be that only one operation succeeds. 16.115 */ 16.116 -static __inline__ void __change_bit(int nr, volatile void * addr) 16.117 +static inline void __change_bit(int nr, volatile void *addr) 16.118 { 16.119 - __asm__ __volatile__( 16.120 - "btcl %1,%0" 16.121 - :"=m" (ADDR) 16.122 - :"dIr" (nr), "m" (ADDR) : "memory"); 16.123 + asm volatile ( 16.124 + "btcl %1,%0" 16.125 + : "=m" (ADDR) 16.126 + : "Ir" (nr), "m" (ADDR) : "memory"); 16.127 } 16.128 +#define __change_bit(nr, addr) ({ \ 16.129 + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ 16.130 + __change_bit(nr, addr); \ 16.131 +}) 16.132 16.133 /** 16.134 * change_bit - Toggle a bit in memory 16.135 @@ -124,13 +149,18 @@ static __inline__ void __change_bit(int 16.136 * Note that @nr may be almost arbitrarily large; this function is not 16.137 * restricted to acting on a single-word quantity. 16.138 */ 16.139 -static __inline__ void change_bit(int nr, volatile void * addr) 16.140 +static inline void change_bit(int nr, volatile void *addr) 16.141 { 16.142 - __asm__ __volatile__( LOCK_PREFIX 16.143 - "btcl %1,%0" 16.144 - :"=m" (ADDR) 16.145 - :"dIr" (nr), "m" (ADDR) : "memory"); 16.146 + asm volatile ( 16.147 + LOCK_PREFIX 16.148 + "btcl %1,%0" 16.149 + : "=m" (ADDR) 16.150 + : "Ir" (nr), "m" (ADDR) : "memory"); 16.151 } 16.152 +#define change_bit(nr, addr) ({ \ 16.153 + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ 16.154 + change_bit(nr, addr); \ 16.155 +}) 16.156 16.157 /** 16.158 * test_and_set_bit - Set a bit and return its old value 16.159 @@ -140,16 +170,21 @@ static __inline__ void change_bit(int nr 16.160 * This operation is atomic and cannot be reordered. 16.161 * It also implies a memory barrier. 16.162 */ 16.163 -static __inline__ int test_and_set_bit(int nr, volatile void * addr) 16.164 +static inline int test_and_set_bit(int nr, volatile void *addr) 16.165 { 16.166 - int oldbit; 16.167 + int oldbit; 16.168 16.169 - __asm__ __volatile__( LOCK_PREFIX 16.170 - "btsl %2,%1\n\tsbbl %0,%0" 16.171 - :"=r" (oldbit),"=m" (ADDR) 16.172 - :"dIr" (nr), "m" (ADDR) : "memory"); 16.173 - return oldbit; 16.174 + asm volatile ( 16.175 + LOCK_PREFIX 16.176 + "btsl %2,%1\n\tsbbl %0,%0" 16.177 + : "=r" (oldbit), "=m" (ADDR) 16.178 + : "Ir" (nr), "m" (ADDR) : "memory"); 16.179 + return oldbit; 16.180 } 16.181 +#define test_and_set_bit(nr, addr) ({ \ 16.182 + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ 16.183 + test_and_set_bit(nr, addr); \ 16.184 +}) 16.185 16.186 /** 16.187 * __test_and_set_bit - Set a bit and return its old value 16.188 @@ -160,16 +195,20 @@ static __inline__ int test_and_set_bit(i 16.189 * If two examples of this operation race, one can appear to succeed 16.190 * but actually fail. You must protect multiple accesses with a lock. 16.191 */ 16.192 -static __inline__ int __test_and_set_bit(int nr, volatile void * addr) 16.193 +static inline int __test_and_set_bit(int nr, volatile void *addr) 16.194 { 16.195 - int oldbit; 16.196 + int oldbit; 16.197 16.198 - __asm__ __volatile__( 16.199 - "btsl %2,%1\n\tsbbl %0,%0" 16.200 - :"=r" (oldbit),"=m" (ADDR) 16.201 - :"dIr" (nr), "m" (ADDR) : "memory"); 16.202 - return oldbit; 16.203 + asm volatile ( 16.204 + "btsl %2,%1\n\tsbbl %0,%0" 16.205 + : "=r" (oldbit), "=m" (ADDR) 16.206 + : "Ir" (nr), "m" (ADDR) : "memory"); 16.207 + return oldbit; 16.208 } 16.209 +#define __test_and_set_bit(nr, addr) ({ \ 16.210 + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ 16.211 + __test_and_set_bit(nr, addr); \ 16.212 +}) 16.213 16.214 /** 16.215 * test_and_clear_bit - Clear a bit and return its old value 16.216 @@ -179,16 +218,21 @@ static __inline__ int __test_and_set_bit 16.217 * This operation is atomic and cannot be reordered. 16.218 * It also implies a memory barrier. 16.219 */ 16.220 -static __inline__ int test_and_clear_bit(int nr, volatile void * addr) 16.221 +static inline int test_and_clear_bit(int nr, volatile void *addr) 16.222 { 16.223 - int oldbit; 16.224 + int oldbit; 16.225 16.226 - __asm__ __volatile__( LOCK_PREFIX 16.227 - "btrl %2,%1\n\tsbbl %0,%0" 16.228 - :"=r" (oldbit),"=m" (ADDR) 16.229 - :"dIr" (nr), "m" (ADDR) : "memory"); 16.230 - return oldbit; 16.231 + asm volatile ( 16.232 + LOCK_PREFIX 16.233 + "btrl %2,%1\n\tsbbl %0,%0" 16.234 + : "=r" (oldbit), "=m" (ADDR) 16.235 + : "Ir" (nr), "m" (ADDR) : "memory"); 16.236 + return oldbit; 16.237 } 16.238 +#define test_and_clear_bit(nr, addr) ({ \ 16.239 + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ 16.240 + test_and_clear_bit(nr, addr); \ 16.241 +}) 16.242 16.243 /** 16.244 * __test_and_clear_bit - Clear a bit and return its old value 16.245 @@ -199,28 +243,36 @@ static __inline__ int test_and_clear_bit 16.246 * If two examples of this operation race, one can appear to succeed 16.247 * but actually fail. You must protect multiple accesses with a lock. 16.248 */ 16.249 -static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) 16.250 +static inline int __test_and_clear_bit(int nr, volatile void *addr) 16.251 { 16.252 - int oldbit; 16.253 + int oldbit; 16.254 16.255 - __asm__ __volatile__( 16.256 - "btrl %2,%1\n\tsbbl %0,%0" 16.257 - :"=r" (oldbit),"=m" (ADDR) 16.258 - :"dIr" (nr), "m" (ADDR) : "memory"); 16.259 - return oldbit; 16.260 + asm volatile ( 16.261 + "btrl %2,%1\n\tsbbl %0,%0" 16.262 + : "=r" (oldbit), "=m" (ADDR) 16.263 + : "Ir" (nr), "m" (ADDR) : "memory"); 16.264 + return oldbit; 16.265 } 16.266 +#define __test_and_clear_bit(nr, addr) ({ \ 16.267 + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ 16.268 + __test_and_clear_bit(nr, addr); \ 16.269 +}) 16.270 16.271 /* WARNING: non atomic and it can be reordered! */ 16.272 -static __inline__ int __test_and_change_bit(int nr, volatile void * addr) 16.273 +static inline int __test_and_change_bit(int nr, volatile void *addr) 16.274 { 16.275 - int oldbit; 16.276 + int oldbit; 16.277 16.278 - __asm__ __volatile__( 16.279 - "btcl %2,%1\n\tsbbl %0,%0" 16.280 - :"=r" (oldbit),"=m" (ADDR) 16.281 - :"dIr" (nr), "m" (ADDR) : "memory"); 16.282 - return oldbit; 16.283 + asm volatile ( 16.284 + "btcl %2,%1\n\tsbbl %0,%0" 16.285 + : "=r" (oldbit), "=m" (ADDR) 16.286 + : "Ir" (nr), "m" (ADDR) : "memory"); 16.287 + return oldbit; 16.288 } 16.289 +#define __test_and_change_bit(nr, addr) ({ \ 16.290 + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ 16.291 + __test_and_change_bit(nr, addr); \ 16.292 +}) 16.293 16.294 /** 16.295 * test_and_change_bit - Change a bit and return its new value 16.296 @@ -230,38 +282,45 @@ static __inline__ int __test_and_change_ 16.297 * This operation is atomic and cannot be reordered. 16.298 * It also implies a memory barrier. 16.299 */ 16.300 -static __inline__ int test_and_change_bit(int nr, volatile void * addr) 16.301 +static inline int test_and_change_bit(int nr, volatile void *addr) 16.302 { 16.303 - int oldbit; 16.304 + int oldbit; 16.305 16.306 - __asm__ __volatile__( LOCK_PREFIX 16.307 - "btcl %2,%1\n\tsbbl %0,%0" 16.308 - :"=r" (oldbit),"=m" (ADDR) 16.309 - :"dIr" (nr), "m" (ADDR) : "memory"); 16.310 - return oldbit; 16.311 + asm volatile ( 16.312 + LOCK_PREFIX 16.313 + "btcl %2,%1\n\tsbbl %0,%0" 16.314 + : "=r" (oldbit), "=m" (ADDR) 16.315 + : "Ir" (nr), "m" (ADDR) : "memory"); 16.316 + return oldbit; 16.317 +} 16.318 +#define test_and_change_bit(nr, addr) ({ \ 16.319 + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ 16.320 + test_and_change_bit(nr, addr); \ 16.321 +}) 16.322 + 16.323 +static inline int constant_test_bit(int nr, const volatile void *addr) 16.324 +{ 16.325 + return ((1U << (nr & 31)) & 16.326 + (((const volatile unsigned int *)addr)[nr >> 5])) != 0; 16.327 } 16.328 16.329 +static inline int variable_test_bit(int nr, const volatile void *addr) 16.330 +{ 16.331 + int oldbit; 16.332 16.333 -static __inline__ int constant_test_bit(int nr, const volatile void * addr) 16.334 -{ 16.335 - return ((1U << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; 16.336 + asm volatile ( 16.337 + "btl %2,%1\n\tsbbl %0,%0" 16.338 + : "=r" (oldbit) 16.339 + : "m" (CONST_ADDR), "Ir" (nr) : "memory" ); 16.340 + return oldbit; 16.341 } 16.342 16.343 -static __inline__ int variable_test_bit(int nr, const volatile void * addr) 16.344 -{ 16.345 - int oldbit; 16.346 - 16.347 - __asm__ __volatile__( 16.348 - "btl %2,%1\n\tsbbl %0,%0" 16.349 - :"=r" (oldbit) 16.350 - :"m" (CONST_ADDR),"dIr" (nr)); 16.351 - return oldbit; 16.352 -} 16.353 - 16.354 -#define test_bit(nr,addr) \ 16.355 -(__builtin_constant_p(nr) ? \ 16.356 - constant_test_bit((nr),(addr)) : \ 16.357 - variable_test_bit((nr),(addr))) 16.358 +#define test_bit(nr, addr) ({ \ 16.359 + if ( bitop_bad_size(addr) ) __bitop_bad_size(); \ 16.360 + (__builtin_constant_p(nr) ? \ 16.361 + constant_test_bit((nr),(addr)) : \ 16.362 + variable_test_bit((nr),(addr))); \ 16.363 +}) 16.364 16.365 extern unsigned int __find_first_bit( 16.366 const unsigned long *addr, unsigned int size); 16.367 @@ -275,8 +334,8 @@ extern unsigned int __find_next_zero_bit 16.368 /* return index of first bit set in val or BITS_PER_LONG when no bit is set */ 16.369 static inline unsigned int __scanbit(unsigned long val) 16.370 { 16.371 - __asm__ ( "bsf %1,%0" : "=r" (val) : "r" (val), "0" (BITS_PER_LONG) ); 16.372 - return (unsigned int)val; 16.373 + asm ( "bsf %1,%0" : "=r" (val) : "r" (val), "0" (BITS_PER_LONG) ); 16.374 + return (unsigned int)val; 16.375 } 16.376 16.377 /** 16.378 @@ -335,10 +394,10 @@ static inline unsigned int __scanbit(uns 16.379 * Returns the bit-number of the first set bit. If no bits are set then the 16.380 * result is undefined. 16.381 */ 16.382 -static __inline__ unsigned int find_first_set_bit(unsigned long word) 16.383 +static inline unsigned int find_first_set_bit(unsigned long word) 16.384 { 16.385 - __asm__ ( "bsf %1,%0" : "=r" (word) : "r" (word) ); 16.386 - return (unsigned int)word; 16.387 + asm ( "bsf %1,%0" : "=r" (word) : "r" (word) ); 16.388 + return (unsigned int)word; 16.389 } 16.390 16.391 /** 16.392 @@ -349,10 +408,10 @@ static __inline__ unsigned int find_firs 16.393 */ 16.394 static inline unsigned long ffz(unsigned long word) 16.395 { 16.396 - __asm__("bsf %1,%0" 16.397 - :"=r" (word) 16.398 - :"r" (~word)); 16.399 - return word; 16.400 + asm ( "bsf %1,%0" 16.401 + :"=r" (word) 16.402 + :"r" (~word)); 16.403 + return word; 16.404 } 16.405 16.406 /** 16.407 @@ -365,13 +424,13 @@ static inline unsigned long ffz(unsigned 16.408 */ 16.409 static inline int ffs(unsigned long x) 16.410 { 16.411 - long r; 16.412 + long r; 16.413 16.414 - __asm__("bsf %1,%0\n\t" 16.415 - "jnz 1f\n\t" 16.416 - "mov $-1,%0\n" 16.417 - "1:" : "=r" (r) : "rm" (x)); 16.418 - return (int)r+1; 16.419 + asm ( "bsf %1,%0\n\t" 16.420 + "jnz 1f\n\t" 16.421 + "mov $-1,%0\n" 16.422 + "1:" : "=r" (r) : "rm" (x)); 16.423 + return (int)r+1; 16.424 } 16.425 16.426 /** 16.427 @@ -382,13 +441,13 @@ static inline int ffs(unsigned long x) 16.428 */ 16.429 static inline int fls(unsigned long x) 16.430 { 16.431 - long r; 16.432 + long r; 16.433 16.434 - __asm__("bsr %1,%0\n\t" 16.435 - "jnz 1f\n\t" 16.436 - "mov $-1,%0\n" 16.437 - "1:" : "=r" (r) : "rm" (x)); 16.438 - return (int)r+1; 16.439 + asm ( "bsr %1,%0\n\t" 16.440 + "jnz 1f\n\t" 16.441 + "mov $-1,%0\n" 16.442 + "1:" : "=r" (r) : "rm" (x)); 16.443 + return (int)r+1; 16.444 } 16.445 16.446 /**
17.1 --- a/xen/include/asm-x86/event.h Mon Mar 10 22:51:57 2008 +0000 17.2 +++ b/xen/include/asm-x86/event.h Sun Mar 16 14:11:34 2008 +0000 17.3 @@ -30,7 +30,10 @@ static inline void vcpu_kick(struct vcpu 17.4 17.5 static inline void vcpu_mark_events_pending(struct vcpu *v) 17.6 { 17.7 - if ( test_and_set_bit(0, &vcpu_info(v, evtchn_upcall_pending)) ) 17.8 + int already_pending = test_and_set_bit( 17.9 + 0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending)); 17.10 + 17.11 + if ( already_pending ) 17.12 return; 17.13 17.14 if ( is_hvm_vcpu(v) )
18.1 --- a/xen/include/asm-x86/grant_table.h Mon Mar 10 22:51:57 2008 +0000 18.2 +++ b/xen/include/asm-x86/grant_table.h Sun Mar 16 14:11:34 2008 +0000 18.3 @@ -35,7 +35,7 @@ int replace_grant_host_mapping( 18.4 18.5 static inline void gnttab_clear_flag(unsigned long nr, uint16_t *addr) 18.6 { 18.7 - clear_bit(nr, addr); 18.8 + clear_bit(nr, (unsigned long *)addr); 18.9 } 18.10 18.11 /* Foreign mappings of HHVM-guest pages do not modify the type count. */
19.1 --- a/xen/include/asm-x86/hvm/support.h Mon Mar 10 22:51:57 2008 +0000 19.2 +++ b/xen/include/asm-x86/hvm/support.h Sun Mar 16 14:11:34 2008 +0000 19.3 @@ -78,7 +78,7 @@ extern unsigned int opt_hvm_debug_level; 19.4 #define HVM_DBG_LOG(level, _f, _a...) 19.5 #endif 19.6 19.7 -extern char hvm_io_bitmap[]; 19.8 +extern unsigned long hvm_io_bitmap[]; 19.9 19.10 void hvm_enable(struct hvm_function_table *); 19.11
20.1 --- a/xen/include/asm-x86/hvm/svm/vmcb.h Mon Mar 10 22:51:57 2008 +0000 20.2 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h Sun Mar 16 14:11:34 2008 +0000 20.3 @@ -448,7 +448,7 @@ struct arch_svm_struct { 20.4 struct vmcb_struct *vmcb; 20.5 u64 vmcb_pa; 20.6 u64 asid_generation; /* ASID tracking, moved here for cache locality. */ 20.7 - char *msrpm; 20.8 + unsigned long *msrpm; 20.9 int launch_core; 20.10 bool_t vmcb_in_sync; /* VMCB sync'ed with VMSAVE? */ 20.11 };
21.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h Mon Mar 10 22:51:57 2008 +0000 21.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h Sun Mar 16 14:11:34 2008 +0000 21.3 @@ -81,7 +81,7 @@ struct arch_vmx_struct { 21.4 unsigned long cstar; 21.5 #endif 21.6 21.7 - char *msr_bitmap; 21.8 + unsigned long *msr_bitmap; 21.9 unsigned int msr_count; 21.10 struct vmx_msr_entry *msr_area; 21.11 unsigned int host_msr_count;
22.1 --- a/xen/include/asm-x86/shared.h Mon Mar 10 22:51:57 2008 +0000 22.2 +++ b/xen/include/asm-x86/shared.h Sun Mar 16 14:11:34 2008 +0000 22.3 @@ -3,9 +3,9 @@ 22.4 22.5 #ifdef CONFIG_COMPAT 22.6 22.7 -#define nmi_reason(d) (!has_32bit_shinfo(d) ? \ 22.8 - (void *)&(d)->shared_info->native.arch.nmi_reason : \ 22.9 - (void *)&(d)->shared_info->compat.arch.nmi_reason) 22.10 +#define nmi_reason(d) (!has_32bit_shinfo(d) ? \ 22.11 + (u32 *)&(d)->shared_info->native.arch.nmi_reason : \ 22.12 + (u32 *)&(d)->shared_info->compat.arch.nmi_reason) 22.13 22.14 #define GET_SET_SHARED(type, field) \ 22.15 static inline type arch_get_##field(const struct domain *d) \ 22.16 @@ -41,7 +41,7 @@ static inline void arch_set_##field(stru 22.17 22.18 #else 22.19 22.20 -#define nmi_reason(d) ((void *)&(d)->shared_info->arch.nmi_reason) 22.21 +#define nmi_reason(d) ((u32 *)&(d)->shared_info->arch.nmi_reason) 22.22 22.23 #define GET_SET_SHARED(type, field) \ 22.24 static inline type arch_get_##field(const struct domain *d) \
23.1 --- a/xen/include/xen/shared.h Mon Mar 10 22:51:57 2008 +0000 23.2 +++ b/xen/include/xen/shared.h Sun Mar 16 14:11:34 2008 +0000 23.3 @@ -12,44 +12,36 @@ typedef union { 23.4 struct compat_shared_info compat; 23.5 } shared_info_t; 23.6 23.7 -#define __shared_info(d, s, field) (*(!has_32bit_shinfo(d) ? \ 23.8 - &(s)->native.field : \ 23.9 - &(s)->compat.field)) 23.10 -#define __shared_info_addr(d, s, field) (!has_32bit_shinfo(d) ? \ 23.11 - (void *)&(s)->native.field : \ 23.12 - (void *)&(s)->compat.field) 23.13 - 23.14 +/* 23.15 + * Compat field is never larger than native field, so cast to that as it 23.16 + * is the largest memory range it is safe for the caller to modify without 23.17 + * further discrimination between compat and native cases. 23.18 + */ 23.19 +#define __shared_info(d, s, field) \ 23.20 + (*(!has_32bit_shinfo(d) ? \ 23.21 + (typeof(&(s)->compat.field))&(s)->native.field : \ 23.22 + (typeof(&(s)->compat.field))&(s)->compat.field)) 23.23 #define shared_info(d, field) \ 23.24 __shared_info(d, (d)->shared_info, field) 23.25 -#define shared_info_addr(d, field) \ 23.26 - __shared_info_addr(d, (d)->shared_info, field) 23.27 23.28 typedef union { 23.29 struct vcpu_info native; 23.30 struct compat_vcpu_info compat; 23.31 } vcpu_info_t; 23.32 23.33 -#define vcpu_info(v, field) (*(!has_32bit_shinfo((v)->domain) ? \ 23.34 - &(v)->vcpu_info->native.field : \ 23.35 - &(v)->vcpu_info->compat.field)) 23.36 -#define vcpu_info_addr(v, field) (!has_32bit_shinfo((v)->domain) ? \ 23.37 - (void *)&(v)->vcpu_info->native.field : \ 23.38 - (void *)&(v)->vcpu_info->compat.field) 23.39 +/* As above, cast to compat field type. */ 23.40 +#define vcpu_info(v, field) \ 23.41 + (*(!has_32bit_shinfo((v)->domain) ? \ 23.42 + (typeof(&(v)->vcpu_info->compat.field))&(v)->vcpu_info->native.field : \ 23.43 + (typeof(&(v)->vcpu_info->compat.field))&(v)->vcpu_info->compat.field)) 23.44 23.45 #else 23.46 23.47 typedef struct shared_info shared_info_t; 23.48 - 23.49 -#define __shared_info(d, s, field) ((s)->field) 23.50 -#define __shared_info_addr(d, s, field) ((void *)&(s)->field) 23.51 - 23.52 #define shared_info(d, field) ((d)->shared_info->field) 23.53 -#define shared_info_addr(d, field) ((void *)&(d)->shared_info->field) 23.54 23.55 typedef struct vcpu_info vcpu_info_t; 23.56 - 23.57 #define vcpu_info(v, field) ((v)->vcpu_info->field) 23.58 -#define vcpu_info_addr(v, field) ((void *)&(v)->vcpu_info->field) 23.59 23.60 #endif 23.61