ia64/xen-unstable

changeset 9982:6e979aa0e6d2

[IA64] panic -> panic domain

This patch uses panic domain instead of panic when the
panic happening is only related to current domain not whole system.

Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
author awilliam@xenbuild.aw
date Tue May 09 15:23:33 2006 -0600 (2006-05-09)
parents 874661fc2d42
children 11b7dc3529b9
files xen/arch/ia64/vmx/pal_emul.c xen/arch/ia64/vmx/vlsapic.c xen/arch/ia64/vmx/vmmu.c xen/arch/ia64/vmx/vmx_init.c xen/arch/ia64/vmx/vmx_phy_mode.c xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/vmx/vmx_support.c xen/arch/ia64/vmx/vmx_vcpu.c xen/arch/ia64/vmx/vmx_virt.c xen/include/asm-ia64/vmx_vcpu.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/pal_emul.c	Tue May 09 12:42:44 2006 -0600
     1.2 +++ b/xen/arch/ia64/vmx/pal_emul.c	Tue May 09 15:23:33 2006 -0600
     1.3 @@ -62,8 +62,8 @@ pal_cache_flush (VCPU *vcpu) {
     1.4  //		ia64_pal_call_static(gr28 ,gr29, gr30, 
     1.5  //				result.v1,1LL);
     1.6  //	}
     1.7 -	while (result.status != 0) {
     1.8 -        panic("PAL_CACHE_FLUSH ERROR, status %ld", result.status);
     1.9 +	if(result.status != 0) {
    1.10 +        	panic_domain(vcpu_regs(vcpu),"PAL_CACHE_FLUSH ERROR, status %ld", result.status);
    1.11  	}
    1.12  
    1.13  	return result;
    1.14 @@ -445,7 +445,7 @@ pal_emul( VCPU *vcpu) {
    1.15  			break;
    1.16  
    1.17  		default:
    1.18 -			panic("pal_emul(): guest call unsupported pal" );
    1.19 +			panic_domain(vcpu_regs(vcpu),"pal_emul(): guest call unsupported pal" );
    1.20    }
    1.21  		set_pal_result (vcpu, result);
    1.22  }
     2.1 --- a/xen/arch/ia64/vmx/vlsapic.c	Tue May 09 12:42:44 2006 -0600
     2.2 +++ b/xen/arch/ia64/vmx/vlsapic.c	Tue May 09 15:23:33 2006 -0600
     2.3 @@ -568,7 +568,7 @@ int vmx_check_pending_irq(VCPU *vcpu)
     2.4      if (  vpsr.i && IRQ_NO_MASKED == mask ) {
     2.5          isr = vpsr.val & IA64_PSR_RI;
     2.6          if ( !vpsr.ic )
     2.7 -            panic("Interrupt when IC=0\n");
     2.8 +            panic_domain(regs,"Interrupt when IC=0\n");
     2.9          vmx_reflect_interruption(0,isr,0, 12, regs ); // EXT IRQ
    2.10          injected = 1;
    2.11      }
    2.12 @@ -595,7 +595,8 @@ void guest_write_eoi(VCPU *vcpu)
    2.13      uint64_t  spsr;
    2.14  
    2.15      vec = highest_inservice_irq(vcpu);
    2.16 -    if ( vec == NULL_VECTOR ) panic("Wrong vector to EOI\n");
    2.17 +    if ( vec == NULL_VECTOR ) 
    2.18 +	panic_domain(vcpu_regs(vcpu),"Wrong vector to EOI\n");
    2.19      local_irq_save(spsr);
    2.20      VLSAPIC_INSVC(vcpu,vec>>6) &= ~(1UL <<(vec&63));
    2.21      local_irq_restore(spsr);
    2.22 @@ -634,7 +635,7 @@ static void generate_exirq(VCPU *vcpu)
    2.23      update_vhpi(vcpu, NULL_VECTOR);
    2.24      isr = vpsr.val & IA64_PSR_RI;
    2.25      if ( !vpsr.ic )
    2.26 -        panic("Interrupt when IC=0\n");
    2.27 +        panic_domain(regs,"Interrupt when IC=0\n");
    2.28      vmx_reflect_interruption(0,isr,0, 12, regs); // EXT IRQ
    2.29  }
    2.30  
     3.1 --- a/xen/arch/ia64/vmx/vmmu.c	Tue May 09 12:42:44 2006 -0600
     3.2 +++ b/xen/arch/ia64/vmx/vmmu.c	Tue May 09 15:23:33 2006 -0600
     3.3 @@ -134,7 +134,7 @@ static void init_domain_vhpt(struct vcpu
     3.4      void * vbase;
     3.5      page = alloc_domheap_pages (NULL, VCPU_VHPT_ORDER, 0);
     3.6      if ( page == NULL ) {
     3.7 -        panic("No enough contiguous memory for init_domain_vhpt\n");
     3.8 +        panic_domain(vcpu_regs(v),"No enough contiguous memory for init_domain_vhpt\n");
     3.9      }
    3.10      vbase = page_to_virt(page);
    3.11      memset(vbase, 0, VCPU_VHPT_SIZE);
    3.12 @@ -157,7 +157,7 @@ void init_domain_tlb(struct vcpu *v)
    3.13      init_domain_vhpt(v);
    3.14      page = alloc_domheap_pages (NULL, VCPU_VTLB_ORDER, 0);
    3.15      if ( page == NULL ) {
    3.16 -        panic("No enough contiguous memory for init_domain_tlb\n");
    3.17 +        panic_domain(vcpu_regs(v),"No enough contiguous memory for init_domain_tlb\n");
    3.18      }
    3.19      vbase = page_to_virt(page);
    3.20      memset(vbase, 0, VCPU_VTLB_SIZE);
    3.21 @@ -200,7 +200,7 @@ void machine_tlb_insert(struct vcpu *d, 
    3.22      mtlb.ppn = get_mfn(d->domain,tlb->ppn);
    3.23      mtlb_ppn=mtlb.ppn;
    3.24      if (mtlb_ppn == INVALID_MFN)
    3.25 -    panic("Machine tlb insert with invalid mfn number.\n");
    3.26 +        panic_domain(vcpu_regs(d),"Machine tlb insert with invalid mfn number.\n");
    3.27  
    3.28      psr = ia64_clear_ic();
    3.29      if ( cl == ISIDE_TLB ) {
    3.30 @@ -323,12 +323,12 @@ fetch_code(VCPU *vcpu, u64 gip, u64 *cod
    3.31      }
    3.32      if( gpip){
    3.33  	 mfn = gmfn_to_mfn(vcpu->domain, gpip >>PAGE_SHIFT);
    3.34 -    	if( mfn == INVALID_MFN )  panic("fetch_code: invalid memory\n");
    3.35 +    	if( mfn == INVALID_MFN )  panic_domain(vcpu_regs(vcpu),"fetch_code: invalid memory\n");
    3.36      	vpa =(u64 *)__va( (gip & (PAGE_SIZE-1)) | (mfn<<PAGE_SHIFT));
    3.37      }else{
    3.38  	tlb = vhpt_lookup(gip);
    3.39  	if( tlb == NULL)
    3.40 -	    panic("No entry found in ITLB and DTLB\n");
    3.41 +	    panic_domain(vcpu_regs(vcpu),"No entry found in ITLB and DTLB\n");
    3.42  	vpa =(u64 *)__va((tlb->ppn>>(PAGE_SHIFT-ARCH_PAGE_SHIFT)<<PAGE_SHIFT)|(gip&(PAGE_SIZE-1)));
    3.43      }
    3.44      *code1 = *vpa++;
    3.45 @@ -345,7 +345,7 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN
    3.46      slot = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
    3.47      if (slot >=0) {
    3.48          // generate MCA.
    3.49 -        panic("Tlb conflict!!");
    3.50 +        panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
    3.51          return IA64_FAULT;
    3.52      }
    3.53      thash_purge_and_insert(vcpu, pte, itir, ifa);
    3.54 @@ -361,7 +361,7 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN
    3.55      slot = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
    3.56      if (slot >=0) {
    3.57          // generate MCA.
    3.58 -        panic("Tlb conflict!!");
    3.59 +        panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
    3.60          return IA64_FAULT;
    3.61      }
    3.62      gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
    3.63 @@ -385,7 +385,7 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, u64
    3.64      index = vtr_find_overlap(vcpu, va, ps, ISIDE_TLB);
    3.65      if (index >=0) {
    3.66          // generate MCA.
    3.67 -        panic("Tlb conflict!!");
    3.68 +        panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
    3.69          return IA64_FAULT;
    3.70      }
    3.71      thash_purge_entries(vcpu, va, ps);
    3.72 @@ -407,7 +407,7 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64
    3.73      index = vtr_find_overlap(vcpu, va, ps, DSIDE_TLB);
    3.74      if (index>=0) {
    3.75          // generate MCA.
    3.76 -        panic("Tlb conflict!!");
    3.77 +        panic_domain(vcpu_regs(vcpu),"Tlb conflict!!");
    3.78          return IA64_FAULT;
    3.79      }
    3.80      thash_purge_entries(vcpu, va, ps);
     4.1 --- a/xen/arch/ia64/vmx/vmx_init.c	Tue May 09 12:42:44 2006 -0600
     4.2 +++ b/xen/arch/ia64/vmx/vmx_init.c	Tue May 09 15:23:33 2006 -0600
     4.3 @@ -208,8 +208,9 @@ vmx_create_vp(struct vcpu *v)
     4.4  	ivt_base = (u64) &vmx_ia64_ivt;
     4.5  	printk("ivt_base: 0x%lx\n", ivt_base);
     4.6  	ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)ivt_base, 0);
     4.7 -	if (ret != PAL_STATUS_SUCCESS)
     4.8 -		panic("ia64_pal_vp_create failed. \n");
     4.9 +	if (ret != PAL_STATUS_SUCCESS){
    4.10 +		panic_domain(vcpu_regs(v),"ia64_pal_vp_create failed. \n");
    4.11 +	}
    4.12  }
    4.13  
    4.14  /* Other non-context related tasks can be done in context switch */
    4.15 @@ -220,8 +221,9 @@ vmx_save_state(struct vcpu *v)
    4.16  
    4.17  	/* FIXME: about setting of pal_proc_vector... time consuming */
    4.18  	status = ia64_pal_vp_save((u64 *)v->arch.privregs, 0);
    4.19 -	if (status != PAL_STATUS_SUCCESS)
    4.20 -		panic("Save vp status failed\n");
    4.21 +	if (status != PAL_STATUS_SUCCESS){
    4.22 +		panic_domain(vcpu_regs(v),"Save vp status failed\n");
    4.23 +	}
    4.24  
    4.25  
    4.26  	/* Need to save KR when domain switch, though HV itself doesn;t
    4.27 @@ -244,8 +246,9 @@ vmx_load_state(struct vcpu *v)
    4.28  	u64 status;
    4.29  
    4.30  	status = ia64_pal_vp_restore((u64 *)v->arch.privregs, 0);
    4.31 -	if (status != PAL_STATUS_SUCCESS)
    4.32 -		panic("Restore vp status failed\n");
    4.33 +	if (status != PAL_STATUS_SUCCESS){
    4.34 +		panic_domain(vcpu_regs(v),"Restore vp status failed\n");
    4.35 +	}
    4.36  
    4.37  	ia64_set_kr(0, v->arch.arch_vmx.vkr[0]);
    4.38  	ia64_set_kr(1, v->arch.arch_vmx.vkr[1]);
     5.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c	Tue May 09 12:42:44 2006 -0600
     5.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c	Tue May 09 15:23:33 2006 -0600
     5.3 @@ -186,8 +186,10 @@ vmx_load_all_rr(VCPU *vcpu)
     5.4  	 * mode in same region
     5.5  	 */
     5.6  	if (is_physical_mode(vcpu)) {
     5.7 -		if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
     5.8 -			panic("Unexpected domain switch in phy emul\n");
     5.9 +		if (vcpu->arch.mode_flags & GUEST_PHY_EMUL){
    5.10 +			panic_domain(vcpu_regs(vcpu),
    5.11 +			             "Unexpected domain switch in phy emul\n");
    5.12 +		}
    5.13  		phy_rr.rrval = vcpu->arch.metaphysical_rr0;
    5.14  		//phy_rr.ps = PAGE_SHIFT;
    5.15  		phy_rr.ve = 1;
    5.16 @@ -322,8 +324,7 @@ switch_mm_mode(VCPU *vcpu, IA64_PSR old_
    5.17          break;
    5.18      default:
    5.19          /* Sanity check */
    5.20 -    printf("old: %lx, new: %lx\n", old_psr.val, new_psr.val);
    5.21 -        panic("Unexpected virtual <--> physical mode transition");
    5.22 +        panic_domain(vcpu_regs(vcpu),"Unexpected virtual <--> physical mode transition,old:%lx,new:%lx\n",old_psr.val,new_psr.val);
    5.23          break;
    5.24      }
    5.25      return;
     6.1 --- a/xen/arch/ia64/vmx/vmx_process.c	Tue May 09 12:42:44 2006 -0600
     6.2 +++ b/xen/arch/ia64/vmx/vmx_process.c	Tue May 09 15:23:33 2006 -0600
     6.3 @@ -338,7 +338,7 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
     6.4      }
     6.5      if(vec == 1) type = ISIDE_TLB;
     6.6      else if(vec == 2) type = DSIDE_TLB;
     6.7 -    else panic("wrong vec\n");
     6.8 +    else panic_domain(regs,"wrong vec:%0xlx\n",vec);
     6.9  
    6.10  //    prepare_if_physical_mode(v);
    6.11  
     7.1 --- a/xen/arch/ia64/vmx/vmx_support.c	Tue May 09 12:42:44 2006 -0600
     7.2 +++ b/xen/arch/ia64/vmx/vmx_support.c	Tue May 09 15:23:33 2006 -0600
     7.3 @@ -92,12 +92,12 @@ void vmx_io_assist(struct vcpu *v)
     7.4       */
     7.5      vio = get_vio(v->domain, v->vcpu_id);
     7.6      if (!vio)
     7.7 -	panic("Corruption: bad shared page: %lx\n", (unsigned long)vio);
     7.8 +	panic_domain(vcpu_regs(v),"Corruption: bad shared page: %lx\n", (unsigned long)vio);
     7.9  
    7.10      p = &vio->vp_ioreq;
    7.11  
    7.12      if (p->state == STATE_IORESP_HOOK)
    7.13 -	panic("Not supported: No hook available for DM request\n");
    7.14 +	panic_domain(vcpu_regs(v),"Not supported: No hook available for DM request\n");
    7.15  
    7.16      if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
    7.17  	if (p->state != STATE_IORESP_READY) {
    7.18 @@ -135,7 +135,7 @@ void vmx_intr_assist(struct vcpu *v)
    7.19       * out of vmx_wait_io, when guest is still waiting for response.
    7.20       */
    7.21      if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
    7.22 -	panic("!!!Bad resume to guest before I/O emulation is done.\n");
    7.23 +	panic_domain(vcpu_regs(v),"!!!Bad resume to guest before I/O emulation is done.\n");
    7.24  
    7.25      /* Clear indicator specific to interrupt delivered from DM */
    7.26      if (test_and_clear_bit(port,
    7.27 @@ -154,7 +154,7 @@ void vmx_intr_assist(struct vcpu *v)
    7.28       */
    7.29      vio = get_vio(v->domain, v->vcpu_id);
    7.30      if (!vio)
    7.31 -	panic("Corruption: bad shared page: %lx\n", (unsigned long)vio);
    7.32 +	panic_domain(vcpu_regs(v),"Corruption: bad shared page: %lx\n", (unsigned long)vio);
    7.33  
    7.34  #ifdef V_IOSAPIC_READY
    7.35      /* Confirm virtual interrupt line signals, and set pending bits in vpd */
     8.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c	Tue May 09 12:42:44 2006 -0600
     8.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c	Tue May 09 15:23:33 2006 -0600
     8.3 @@ -91,7 +91,7 @@ vmx_vcpu_set_psr(VCPU *vcpu, unsigned lo
     8.4       * Otherwise panic
     8.5       */
     8.6      if ( value & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM )) {
     8.7 -        panic ("Setting unsupport guest psr!");
     8.8 +        panic_domain (regs,"Setting unsupport guest psr!");
     8.9      }
    8.10  
    8.11      /*
     9.1 --- a/xen/arch/ia64/vmx/vmx_virt.c	Tue May 09 12:42:44 2006 -0600
     9.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c	Tue May 09 15:23:33 2006 -0600
     9.3 @@ -182,8 +182,9 @@ IA64FAULT vmx_emul_mov_from_psr(VCPU *vc
     9.4  IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
     9.5  {
     9.6      UINT64 val;
     9.7 +
     9.8      if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
     9.9 -	panic(" get_psr nat bit fault\n");
    9.10 +	panic_domain(vcpu_regs(vcpu),"get_psr nat bit fault\n");
    9.11  
    9.12  	val = (val & MASK(0, 32)) | (VCPU(vcpu, vpsr) & MASK(32, 32));
    9.13  #if 0
    9.14 @@ -216,7 +217,7 @@ IA64FAULT vmx_emul_rfi(VCPU *vcpu, INST6
    9.15      regs=vcpu_regs(vcpu);
    9.16      vpsr.val=regs->cr_ipsr;
    9.17      if ( vpsr.is == 1 ) {
    9.18 -        panic ("We do not support IA32 instruction yet");
    9.19 +        panic_domain(regs,"We do not support IA32 instruction yet");
    9.20      }
    9.21  
    9.22      return vmx_vcpu_rfi(vcpu);
    9.23 @@ -715,8 +716,9 @@ IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *v
    9.24  {
    9.25      // I27 and M30 are identical for these fields
    9.26      UINT64  imm;
    9.27 +
    9.28      if(inst.M30.ar3!=44){
    9.29 -        panic("Can't support ar register other than itc");
    9.30 +        panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
    9.31      }
    9.32  #ifdef  CHECK_FAULT
    9.33      IA64_PSR vpsr;
    9.34 @@ -741,7 +743,7 @@ IA64FAULT vmx_emul_mov_to_ar_reg(VCPU *v
    9.35      // I26 and M29 are identical for these fields
    9.36      u64 r2;
    9.37      if(inst.M29.ar3!=44){
    9.38 -        panic("Can't support ar register other than itc");
    9.39 +        panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
    9.40      }
    9.41      if(vcpu_get_gr_nat(vcpu,inst.M29.r2,&r2)){
    9.42  #ifdef  CHECK_FAULT
    9.43 @@ -769,7 +771,7 @@ IA64FAULT vmx_emul_mov_from_ar_reg(VCPU 
    9.44      // I27 and M30 are identical for these fields
    9.45      u64 r1;
    9.46      if(inst.M31.ar3!=44){
    9.47 -        panic("Can't support ar register other than itc");
    9.48 +        panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc");
    9.49      }
    9.50  #ifdef  CHECK_FAULT
    9.51      if(check_target_register(vcpu,inst.M31.r1)){
    9.52 @@ -1359,8 +1361,7 @@ if ( (cause == 0xff && opcode == 0x1e000
    9.53      slot_type = slot_types[bundle.template][slot];
    9.54      ia64_priv_decoder(slot_type, inst, &cause);
    9.55      if(cause==0){
    9.56 -        printf("This instruction at 0x%lx slot %d can't be  virtualized", iip, slot);
    9.57 -        panic("123456\n");
    9.58 +        panic_domain(regs,"This instruction at 0x%lx slot %d can't be  virtualized", iip, slot);
    9.59      }
    9.60  #else
    9.61      inst.inst=opcode;
    9.62 @@ -1494,12 +1495,8 @@ if ( (cause == 0xff && opcode == 0x1e000
    9.63  	status=IA64_FAULT;
    9.64          break;
    9.65      default:
    9.66 -        printf("unknown cause %ld, iip: %lx, ipsr: %lx\n", cause,regs->cr_iip,regs->cr_ipsr);
    9.67 -        while(1);
    9.68 -	/* For unknown cause, let hardware to re-execute */
    9.69 -	status=IA64_RETRY;
    9.70 +        panic_domain(regs,"unknown cause %ld, iip: %lx, ipsr: %lx\n", cause,regs->cr_iip,regs->cr_ipsr);
    9.71          break;
    9.72 -//        panic("unknown cause in virtualization intercept");
    9.73      };
    9.74  
    9.75  #if 0
    10.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Tue May 09 12:42:44 2006 -0600
    10.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Tue May 09 15:23:33 2006 -0600
    10.3 @@ -359,7 +359,7 @@ IA64FAULT vmx_vcpu_get_cpuid(VCPU *vcpu,
    10.4      // TODO: unimplemented DBRs return a reserved register fault
    10.5      // TODO: Should set Logical CPU state, not just physical
    10.6      if(reg > 4){
    10.7 -        panic("there are only five cpuid registers");
    10.8 +        panic_domain(vcpu_regs(vcpu),"there are only five cpuid registers");
    10.9      }
   10.10      *pval=VCPU(vcpu,vcpuid[reg]);
   10.11      return (IA64_NO_FAULT);