ia64/xen-unstable

changeset 12012:8ee7bb2af497

[IA64] fix coding style in vmx_init.c

Signed-off-by: Akio Takebe <takebe_akio@jp.fujitsu.com>
author awilliam@xenbuild.aw
date Tue Oct 31 22:25:31 2006 -0700 (2006-10-31)
parents 1eb880e9ff94
children 64ede00a004d
files xen/arch/ia64/vmx/vmx_init.c
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmx_init.c	Tue Oct 31 22:25:25 2006 -0700
     1.2 +++ b/xen/arch/ia64/vmx/vmx_init.c	Tue Oct 31 22:25:31 2006 -0700
     1.3 @@ -183,7 +183,6 @@ static vpd_t *alloc_vpd(void)
     1.4  	mregs->vac.a_cover = 1;
     1.5  	mregs->vac.a_bsw = 1;
     1.6  	mregs->vac.a_int = 1;
     1.7 -       
     1.8  	mregs->vdc.d_vmsw = 1;
     1.9  
    1.10  	return vpd;
    1.11 @@ -276,7 +275,7 @@ static void vmx_create_event_channels(st
    1.12  		for_each_vcpu(v->domain, o) {
    1.13  			p = get_vio(v->domain, o->vcpu_id);
    1.14  			o->arch.arch_vmx.xen_port = p->vp_eport =
    1.15 -			                alloc_unbound_xen_event_channel(o, 0);
    1.16 +					alloc_unbound_xen_event_channel(o, 0);
    1.17  			DPRINTK("Allocated port %d for hvm.\n",
    1.18  			        o->arch.arch_vmx.xen_port);
    1.19  		}
    1.20 @@ -306,8 +305,8 @@ vmx_final_setup_guest(struct vcpu *v)
    1.21  	/* Per-domain vTLB and vhpt implementation. Now vmx domain will stick
    1.22  	 * to this solution. Maybe it can be deferred until we know created
    1.23  	 * one as vmx domain */
    1.24 -#ifndef HASH_VHPT     
    1.25 -        init_domain_tlb(v);
    1.26 +#ifndef HASH_VHPT
    1.27 +	init_domain_tlb(v);
    1.28  #endif
    1.29  	vmx_create_event_channels(v);
    1.30  
    1.31 @@ -378,44 +377,44 @@ static void vmx_build_physmap_table(stru
    1.32  
    1.33  	/* Mark I/O ranges */
    1.34  	for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) {
    1.35 -	    for (j = io_ranges[i].start;
    1.36 -		j < io_ranges[i].start + io_ranges[i].size;
    1.37 -		j += PAGE_SIZE)
    1.38 -		(void)__assign_domain_page(d, j, io_ranges[i].type,
    1.39 -		                           ASSIGN_writable);
    1.40 +		for (j = io_ranges[i].start;
    1.41 +		     j < io_ranges[i].start + io_ranges[i].size; j += PAGE_SIZE)
    1.42 +			(void)__assign_domain_page(d, j, io_ranges[i].type,
    1.43 +			                           ASSIGN_writable);
    1.44  	}
    1.45  
    1.46  	/* Map normal memory below 3G */
    1.47  	end = VMX_CONFIG_PAGES(d) << PAGE_SHIFT;
    1.48  	tmp = end < MMIO_START ? end : MMIO_START;
    1.49  	for (i = 0; (i < tmp) && (list_ent != &d->page_list); i += PAGE_SIZE) {
    1.50 -	    mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
    1.51 -	    list_ent = mfn_to_page(mfn)->list.next;
    1.52 -	    if (VGA_IO_START <= i && i < VGA_IO_START + VGA_IO_SIZE)
    1.53 -		continue;
    1.54 -	    assign_domain_page(d, i, mfn << PAGE_SHIFT);
    1.55 +		mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
    1.56 +		list_ent = mfn_to_page(mfn)->list.next;
    1.57 +		if (VGA_IO_START <= i && i < VGA_IO_START + VGA_IO_SIZE)
    1.58 +			continue;
    1.59 +		assign_domain_page(d, i, mfn << PAGE_SHIFT);
    1.60  	}
    1.61  	ASSERT(list_ent != &d->page_list);
    1.62  
    1.63  	/* Map normal memory beyond 4G */
    1.64  	if (unlikely(end > MMIO_START)) {
    1.65 -	    start = 4 * MEM_G;
    1.66 -	    end = start + (end - 3 * MEM_G);
    1.67 -	    for (i = start;
    1.68 -	         (i < end) && (list_ent != &d->page_list); i += PAGE_SIZE) {
    1.69 -		mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
    1.70 -		assign_domain_page(d, i, mfn << PAGE_SHIFT);
    1.71 -		list_ent = mfn_to_page(mfn)->list.next;
    1.72 -	    }
    1.73 -	    ASSERT(list_ent != &d->page_list);
    1.74 +		start = 4 * MEM_G;
    1.75 +		end = start + (end - 3 * MEM_G);
    1.76 +		for (i = start;
    1.77 +		     (i < end) && (list_ent != &d->page_list); i += PAGE_SIZE) {
    1.78 +			mfn = page_to_mfn(list_entry(list_ent,
    1.79 +			                             struct page_info, list));
    1.80 +			assign_domain_page(d, i, mfn << PAGE_SHIFT);
    1.81 +			list_ent = mfn_to_page(mfn)->list.next;
    1.82 +		}
    1.83 +		ASSERT(list_ent != &d->page_list);
    1.84  	}
    1.85  	 
    1.86  	/* Map guest firmware */
    1.87  	for (i = GFW_START; (i < GFW_START + GFW_SIZE) &&
    1.88 -		(list_ent != &d->page_list); i += PAGE_SIZE) {
    1.89 -	    mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
    1.90 -	    assign_domain_page(d, i, mfn << PAGE_SHIFT);
    1.91 -	    list_ent = mfn_to_page(mfn)->list.next;
    1.92 +	     (list_ent != &d->page_list); i += PAGE_SIZE) {
    1.93 +		mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
    1.94 +		assign_domain_page(d, i, mfn << PAGE_SHIFT);
    1.95 +		list_ent = mfn_to_page(mfn)->list.next;
    1.96  	}
    1.97  	ASSERT(list_ent != &d->page_list);
    1.98  
    1.99 @@ -429,10 +428,10 @@ static void vmx_build_physmap_table(stru
   1.100  	assign_domain_page(d, STORE_PAGE_START, mfn << PAGE_SHIFT);
   1.101  	list_ent = mfn_to_page(mfn)->list.next;
   1.102  	ASSERT(list_ent != &d->page_list);
   1.103 -    
   1.104 -    mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
   1.105 -    assign_domain_page(d, BUFFER_IO_PAGE_START, mfn << PAGE_SHIFT);
   1.106 -    list_ent = mfn_to_page(mfn)->list.next;
   1.107 +
   1.108 +	mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
   1.109 +	assign_domain_page(d, BUFFER_IO_PAGE_START, mfn << PAGE_SHIFT);
   1.110 +	list_ent = mfn_to_page(mfn)->list.next;
   1.111  	ASSERT(list_ent == &d->page_list);
   1.112  }
   1.113  
   1.114 @@ -444,10 +443,10 @@ void vmx_setup_platform(struct domain *d
   1.115  
   1.116  	d->arch.vmx_platform.shared_page_va =
   1.117  		(unsigned long)__va(__gpa_to_mpa(d, IO_PAGE_START));
   1.118 -    //For buffered IO requests.
   1.119 -    spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
   1.120 -    d->arch.hvm_domain.buffered_io_va =
   1.121 -        (unsigned long)__va(__gpa_to_mpa(d, BUFFER_IO_PAGE_START));
   1.122 +	/* For buffered IO requests. */
   1.123 +	spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
   1.124 +	d->arch.hvm_domain.buffered_io_va =
   1.125 +		(unsigned long)__va(__gpa_to_mpa(d, BUFFER_IO_PAGE_START));
   1.126  	/* TEMP */
   1.127  	d->arch.vmx_platform.pib_base = 0xfee00000UL;
   1.128  
   1.129 @@ -455,7 +454,7 @@ void vmx_setup_platform(struct domain *d
   1.130  
   1.131  	/* Only open one port for I/O and interrupt emulation */
   1.132  	memset(&d->shared_info->evtchn_mask[0], 0xff,
   1.133 -	    sizeof(d->shared_info->evtchn_mask));
   1.134 +	       sizeof(d->shared_info->evtchn_mask));
   1.135  
   1.136  	/* initiate spinlock for pass virq */
   1.137  	spin_lock_init(&d->arch.arch_vmx.virq_assist_lock);