direct-io.hg

changeset 10386:b20733e82ab6

[IA64] Enable SMP on VTI-Domain

Signed-off-by: Anthony Xu < anthony.xu@intel.com >
author awilliam@xenbuild.aw
date Thu Jun 08 11:00:09 2006 -0600 (2006-06-08)
parents d8d2b5c08245
children b87ff075dab9
files xen/arch/ia64/vmx/mmio.c xen/arch/ia64/vmx/vlsapic.c xen/arch/ia64/vmx/vmmu.c xen/arch/ia64/vmx/vmx_support.c xen/arch/ia64/vmx/vmx_virt.c xen/arch/ia64/vmx/vtlb.c xen/arch/ia64/xen/xentime.c xen/include/asm-ia64/vmx.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/mmio.c	Thu Jun 08 10:17:22 2006 -0600
     1.2 +++ b/xen/arch/ia64/vmx/mmio.c	Thu Jun 08 11:00:09 2006 -0600
     1.3 @@ -33,8 +33,9 @@
     1.4  #include <asm/mm.h>
     1.5  #include <asm/vmx.h>
     1.6  #include <public/event_channel.h>
     1.7 +#include <public/arch-ia64.h>
     1.8  #include <linux/event.h>
     1.9 -
    1.10 +#include <xen/domain.h>
    1.11  /*
    1.12  struct mmio_list *lookup_mmio(u64 gpa, struct mmio_list *mio_base)
    1.13  {
    1.14 @@ -51,7 +52,7 @@ struct mmio_list *lookup_mmio(u64 gpa, s
    1.15  #define PIB_OFST_INTA           0x1E0000
    1.16  #define PIB_OFST_XTP            0x1E0008
    1.17  
    1.18 -static int write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value);
    1.19 +static void write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value);
    1.20  
    1.21  static void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int ma)
    1.22  {
    1.23 @@ -356,42 +357,67 @@ static void deliver_ipi (VCPU *vcpu, uin
    1.24   */
    1.25  static inline VCPU *lid_2_vcpu (struct domain *d, u64 id, u64 eid)
    1.26  {
    1.27 -	int   i;
    1.28 -	VCPU  *vcpu;
    1.29 -	LID	  lid;
    1.30 -	for (i=0; i<MAX_VIRT_CPUS; i++) {
    1.31 -		vcpu = d->vcpu[i];
    1.32 - 		if (!vcpu)
    1.33 - 			continue;
    1.34 -		lid.val = VCPU(vcpu, lid);
    1.35 -		if ( lid.id == id && lid.eid == eid ) {
    1.36 -		    return vcpu;
    1.37 -		}
    1.38 -	}
    1.39 -	return NULL;
    1.40 +    int   i;
    1.41 +    VCPU  *vcpu;
    1.42 +    LID   lid;
    1.43 +    for (i=0; i<MAX_VIRT_CPUS; i++) {
    1.44 +        vcpu = d->vcpu[i];
    1.45 +        if (!vcpu)
    1.46 +            continue;
    1.47 +        lid.val = VCPU_LID(vcpu);
    1.48 +        if ( lid.id == id && lid.eid == eid )
    1.49 +            return vcpu;
    1.50 +    }
    1.51 +    return NULL;
    1.52  }
    1.53  
    1.54  /*
    1.55   * execute write IPI op.
    1.56   */
    1.57 -static int write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value)
    1.58 +static void write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value)
    1.59  {
    1.60 -    VCPU   *target_cpu;
    1.61 - 
    1.62 -    target_cpu = lid_2_vcpu(vcpu->domain, 
    1.63 -    				((ipi_a_t)addr).id, ((ipi_a_t)addr).eid);
    1.64 -    if ( target_cpu == NULL ) panic_domain (NULL,"Unknown IPI cpu\n");
    1.65 -    if ( target_cpu == vcpu ) {
    1.66 -    	// IPI to self
    1.67 -        deliver_ipi (vcpu, ((ipi_d_t)value).dm, 
    1.68 -                ((ipi_d_t)value).vector);
    1.69 -        return 1;
    1.70 +    VCPU   *targ;
    1.71 +    struct domain *d=vcpu->domain; 
    1.72 +    targ = lid_2_vcpu(vcpu->domain, 
    1.73 +           ((ipi_a_t)addr).id, ((ipi_a_t)addr).eid);
    1.74 +    if ( targ == NULL ) panic_domain (NULL,"Unknown IPI cpu\n");
    1.75 +
    1.76 +    if (!test_bit(_VCPUF_initialised, &targ->vcpu_flags)) {
    1.77 +        struct pt_regs *targ_regs = vcpu_regs (targ);
    1.78 +        struct vcpu_guest_context c;
    1.79 +
    1.80 +        printf ("arch_boot_vcpu: %p %p\n",
    1.81 +                (void *)d->arch.boot_rdv_ip,
    1.82 +                (void *)d->arch.boot_rdv_r1);
    1.83 +        memset (&c, 0, sizeof (c));
    1.84 +
    1.85 +        c.flags = VGCF_VMX_GUEST;
    1.86 +        if (arch_set_info_guest (targ, &c) != 0) {
    1.87 +            printf ("arch_boot_vcpu: failure\n");
    1.88 +            return;
    1.89 +        }
    1.90 +        /* First or next rendez-vous: set registers.  */
    1.91 +        vcpu_init_regs (targ);
    1.92 +        targ_regs->cr_iip = d->arch.boot_rdv_ip;
    1.93 +        targ_regs->r1 = d->arch.boot_rdv_r1;
    1.94 +
    1.95 +        if (test_and_clear_bit(_VCPUF_down,&targ->vcpu_flags)) {
    1.96 +            vcpu_wake(targ);
    1.97 +            printf ("arch_boot_vcpu: vcpu %d awaken %016lx!\n",
    1.98 +                    targ->vcpu_id, targ_regs->cr_iip);
    1.99 +        }
   1.100 +        else
   1.101 +            printf ("arch_boot_vcpu: huu, already awaken!");
   1.102      }
   1.103      else {
   1.104 -    	// TODO: send Host IPI to inject guest SMP IPI interruption
   1.105 -        panic_domain (NULL, "No SM-VP supported!\n");
   1.106 -        return 0;
   1.107 +        int running = test_bit(_VCPUF_running,&targ->vcpu_flags);
   1.108 +        deliver_ipi (targ, ((ipi_d_t)value).dm, 
   1.109 +                    ((ipi_d_t)value).vector);
   1.110 +        vcpu_unblock(targ);
   1.111 +        if (running)
   1.112 +            smp_send_event_check_cpu(targ->processor);
   1.113      }
   1.114 +    return;
   1.115  }
   1.116  
   1.117  
     2.1 --- a/xen/arch/ia64/vmx/vlsapic.c	Thu Jun 08 10:17:22 2006 -0600
     2.2 +++ b/xen/arch/ia64/vmx/vlsapic.c	Thu Jun 08 11:00:09 2006 -0600
     2.3 @@ -362,7 +362,7 @@ void vlsapic_reset(VCPU *vcpu)
     2.4  {
     2.5      int     i;
     2.6  
     2.7 -    VCPU(vcpu, lid) = ia64_getreg(_IA64_REG_CR_LID);
     2.8 +    VCPU(vcpu, lid) = VCPU_LID(vcpu);
     2.9      VCPU(vcpu, ivr) = 0;
    2.10      VCPU(vcpu,tpr) = 0x10000;
    2.11      VCPU(vcpu, eoi) = 0;
     3.1 --- a/xen/arch/ia64/vmx/vmmu.c	Thu Jun 08 10:17:22 2006 -0600
     3.2 +++ b/xen/arch/ia64/vmx/vmmu.c	Thu Jun 08 11:00:09 2006 -0600
     3.3 @@ -492,13 +492,64 @@ IA64FAULT vmx_vcpu_ptc_e(VCPU *vcpu, UIN
     3.4  
     3.5  IA64FAULT vmx_vcpu_ptc_g(VCPU *vcpu, UINT64 va, UINT64 ps)
     3.6  {
     3.7 -    vmx_vcpu_ptc_l(vcpu, va, ps);
     3.8 +    vmx_vcpu_ptc_ga(vcpu, va, ps);
     3.9      return IA64_ILLOP_FAULT;
    3.10  }
    3.11 +/*
    3.12 +IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 va,UINT64 ps)
    3.13 +{
    3.14 +    vmx_vcpu_ptc_l(vcpu, va, ps);
    3.15 +    return IA64_NO_FAULT;
    3.16 +}
    3.17 + */
    3.18 +struct ptc_ga_args {
    3.19 +    unsigned long vadr;
    3.20 +    unsigned long rid;
    3.21 +    unsigned long ps;
    3.22 +    struct vcpu *vcpu;
    3.23 +};
    3.24 +
    3.25 +static void ptc_ga_remote_func (void *varg)
    3.26 +{
    3.27 +    u64 oldrid, moldrid;
    3.28 +    VCPU *v;
    3.29 +    struct ptc_ga_args *args = (struct ptc_ga_args *)varg;
    3.30 +    v = args->vcpu;
    3.31 +    oldrid = VMX(v, vrr[0]);
    3.32 +    VMX(v, vrr[0]) = args->rid;
    3.33 +    moldrid = ia64_get_rr(0x0);
    3.34 +    ia64_set_rr(0x0,vrrtomrr(v,args->rid));
    3.35 +    vmx_vcpu_ptc_l(v, args->vadr, args->ps);
    3.36 +    VMX(v, vrr[0]) = oldrid; 
    3.37 +    ia64_set_rr(0x0,moldrid);
    3.38 +}
    3.39 +
    3.40  
    3.41  IA64FAULT vmx_vcpu_ptc_ga(VCPU *vcpu,UINT64 va,UINT64 ps)
    3.42  {
    3.43 -    vmx_vcpu_ptc_l(vcpu, va, ps);
    3.44 +
    3.45 +    struct domain *d = vcpu->domain;
    3.46 +    struct vcpu *v;
    3.47 +    struct ptc_ga_args args;
    3.48 +
    3.49 +    args.vadr = va<<3>>3;
    3.50 +    vcpu_get_rr(vcpu, va, &args.rid);
    3.51 +    args.ps = ps;
    3.52 +    for_each_vcpu (d, v) {
    3.53 +        args.vcpu = v;
    3.54 +        if (v->processor != vcpu->processor) {
    3.55 +            int proc;
    3.56 +            /* Flush VHPT on remote processors.  */
    3.57 +            do {
    3.58 +                proc = v->processor;
    3.59 +                smp_call_function_single(v->processor, 
    3.60 +                    &ptc_ga_remote_func, &args, 0, 1);
    3.61 +                /* Try again if VCPU has migrated.  */
    3.62 +            } while (proc != v->processor);
    3.63 +        }
    3.64 +        else
    3.65 +            ptc_ga_remote_func(&args);
    3.66 +    }
    3.67      return IA64_NO_FAULT;
    3.68  }
    3.69  
     4.1 --- a/xen/arch/ia64/vmx/vmx_support.c	Thu Jun 08 10:17:22 2006 -0600
     4.2 +++ b/xen/arch/ia64/vmx/vmx_support.c	Thu Jun 08 11:00:09 2006 -0600
     4.3 @@ -138,7 +138,8 @@ void vmx_intr_assist(struct vcpu *v)
     4.4  
     4.5  #ifdef V_IOSAPIC_READY
     4.6      /* Confirm virtual interrupt line signals, and set pending bits in vpd */
     4.7 -    vmx_virq_line_assist(v);
     4.8 +    if(v->vcpu_id==0)
     4.9 +        vmx_virq_line_assist(v);
    4.10  #endif
    4.11      return;
    4.12  }
     5.1 --- a/xen/arch/ia64/vmx/vmx_virt.c	Thu Jun 08 10:17:22 2006 -0600
     5.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c	Thu Jun 08 11:00:09 2006 -0600
     5.3 @@ -317,12 +317,68 @@ IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INS
     5.4  
     5.5  IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INST64 inst)
     5.6  {
     5.7 -    return vmx_emul_ptc_l(vcpu, inst);
     5.8 +    u64 r2,r3;
     5.9 +#ifdef  VMAL_NO_FAULT_CHECK    
    5.10 +    IA64_PSR  vpsr;
    5.11 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
    5.12 +    if ( vpsr.cpl != 0) {
    5.13 +        /* Inject Privileged Operation fault into guest */
    5.14 +        set_privileged_operation_isr (vcpu, 0);
    5.15 +        privilege_op (vcpu);
    5.16 +        return IA64_FAULT;
    5.17 +    }
    5.18 +#endif // VMAL_NO_FAULT_CHECK    
    5.19 +    if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
    5.20 +#ifdef  VMAL_NO_FAULT_CHECK
    5.21 +        ISR isr;
    5.22 +        set_isr_reg_nat_consumption(vcpu,0,0);
    5.23 +        rnat_comsumption(vcpu);
    5.24 +        return IA64_FAULT;
    5.25 +#endif // VMAL_NO_FAULT_CHECK
    5.26 +    }
    5.27 +#ifdef  VMAL_NO_FAULT_CHECK
    5.28 +    if (unimplemented_gva(vcpu,r3) ) {
    5.29 +        isr.val = set_isr_ei_ni(vcpu);
    5.30 +        isr.code = IA64_RESERVED_REG_FAULT;
    5.31 +        vcpu_set_isr(vcpu, isr.val);
    5.32 +        unimpl_daddr(vcpu);
    5.33 +        return IA64_FAULT;
    5.34 +   }
    5.35 +#endif // VMAL_NO_FAULT_CHECK
    5.36 +    return vmx_vcpu_ptc_g(vcpu,r3,bits(r2,2,7));
    5.37  }
    5.38  
    5.39  IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, INST64 inst)
    5.40  {
    5.41 -    return vmx_emul_ptc_l(vcpu, inst);
    5.42 +    u64 r2,r3;
    5.43 +#ifdef  VMAL_NO_FAULT_CHECK    
    5.44 +    IA64_PSR  vpsr;
    5.45 +    vpsr.val=vmx_vcpu_get_psr(vcpu);
    5.46 +    if ( vpsr.cpl != 0) {
    5.47 +        /* Inject Privileged Operation fault into guest */
    5.48 +        set_privileged_operation_isr (vcpu, 0);
    5.49 +        privilege_op (vcpu);
    5.50 +        return IA64_FAULT;
    5.51 +    }
    5.52 +#endif // VMAL_NO_FAULT_CHECK    
    5.53 +    if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
    5.54 +#ifdef  VMAL_NO_FAULT_CHECK
    5.55 +        ISR isr;
    5.56 +        set_isr_reg_nat_consumption(vcpu,0,0);
    5.57 +        rnat_comsumption(vcpu);
    5.58 +        return IA64_FAULT;
    5.59 +#endif // VMAL_NO_FAULT_CHECK
    5.60 +    }
    5.61 +#ifdef  VMAL_NO_FAULT_CHECK
    5.62 +    if (unimplemented_gva(vcpu,r3) ) {
    5.63 +        isr.val = set_isr_ei_ni(vcpu);
    5.64 +        isr.code = IA64_RESERVED_REG_FAULT;
    5.65 +        vcpu_set_isr(vcpu, isr.val);
    5.66 +        unimpl_daddr(vcpu);
    5.67 +        return IA64_FAULT;
    5.68 +   }
    5.69 +#endif // VMAL_NO_FAULT_CHECK
    5.70 +    return vmx_vcpu_ptc_ga(vcpu,r3,bits(r2,2,7));
    5.71  }
    5.72  
    5.73  IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3)
    5.74 @@ -1191,7 +1247,6 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu,
    5.75      }
    5.76  #endif  //CHECK_FAULT
    5.77      r2 = cr_igfld_mask(inst.M32.cr3,r2);
    5.78 -    VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
    5.79      switch (inst.M32.cr3) {
    5.80          case 0: return vmx_vcpu_set_dcr(vcpu,r2);
    5.81          case 1: return vmx_vcpu_set_itm(vcpu,r2);
    5.82 @@ -1207,7 +1262,7 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu,
    5.83          case 24:return vcpu_set_iim(vcpu,r2);
    5.84          case 25:return vcpu_set_iha(vcpu,r2);
    5.85          case 64:printk("SET LID to 0x%lx\n", r2);
    5.86 -		return vmx_vcpu_set_lid(vcpu,r2);
    5.87 +                return IA64_NO_FAULT;
    5.88          case 65:return IA64_NO_FAULT;
    5.89          case 66:return vmx_vcpu_set_tpr(vcpu,r2);
    5.90          case 67:return vmx_vcpu_set_eoi(vcpu,r2);
    5.91 @@ -1220,7 +1275,8 @@ IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu,
    5.92          case 74:return vmx_vcpu_set_cmcv(vcpu,r2);
    5.93          case 80:return vmx_vcpu_set_lrr0(vcpu,r2);
    5.94          case 81:return vmx_vcpu_set_lrr1(vcpu,r2);
    5.95 -        default: return IA64_NO_FAULT;
    5.96 +        default:VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
    5.97 +                return IA64_NO_FAULT;
    5.98      }
    5.99  }
   5.100  
     6.1 --- a/xen/arch/ia64/vmx/vtlb.c	Thu Jun 08 10:17:22 2006 -0600
     6.2 +++ b/xen/arch/ia64/vmx/vtlb.c	Thu Jun 08 11:00:09 2006 -0600
     6.3 @@ -58,12 +58,6 @@ static thash_data_t *cch_alloc(thash_cb_
     6.4      return p;
     6.5  }
     6.6  
     6.7 -static void cch_free(thash_cb_t *hcb, thash_data_t *cch)
     6.8 -{
     6.9 -    cch->next = hcb->cch_freelist;
    6.10 -    hcb->cch_freelist = cch;
    6.11 -}
    6.12 -
    6.13  /*
    6.14   * Check to see if the address rid:va is translated by the TLB
    6.15   */
    6.16 @@ -94,22 +88,6 @@ static int
    6.17  
    6.18  }
    6.19  
    6.20 -/*
    6.21 - * Delete an thash entry leading collision chain.
    6.22 - */
    6.23 -static void __rem_hash_head(thash_cb_t *hcb, thash_data_t *hash)
    6.24 -{
    6.25 -    thash_data_t *next=hash->next;
    6.26 -    if ( next) {
    6.27 -        next->len=hash->len-1;
    6.28 -        *hash = *next;
    6.29 -        cch_free (hcb, next);
    6.30 -    }
    6.31 -    else {
    6.32 -        hash->ti=1;
    6.33 -    }
    6.34 -}
    6.35 -
    6.36  thash_data_t *__vtr_lookup(VCPU *vcpu, u64 va, int is_data)
    6.37  {
    6.38  
    6.39 @@ -142,17 +120,18 @@ thash_data_t *__vtr_lookup(VCPU *vcpu, u
    6.40  
    6.41  static void thash_recycle_cch(thash_cb_t *hcb, thash_data_t *hash)
    6.42  {
    6.43 -    thash_data_t *p;
    6.44 +    thash_data_t *p, *q;
    6.45      int i=0;
    6.46      
    6.47      p=hash;
    6.48      for(i=0; i < MAX_CCN_DEPTH; i++){
    6.49          p=p->next;
    6.50      }
    6.51 -    p->next=hcb->cch_freelist;
    6.52 -    hcb->cch_freelist=hash->next;
    6.53 +    q=hash->next;
    6.54      hash->len=0;
    6.55      hash->next=0;
    6.56 +    p->next=hcb->cch_freelist;
    6.57 +    hcb->cch_freelist=q;
    6.58  }
    6.59  
    6.60  
    6.61 @@ -265,16 +244,14 @@ static void vtlb_purge(thash_cb_t *hcb, 
    6.62          hash_table = vsa_thash(hcb->pta, start, vrr.rrval, &tag);
    6.63          if(!INVALID_TLB(hash_table)){
    6.64              if(hash_table->etag == tag){
    6.65 -                __rem_hash_head(hcb, hash_table);
    6.66 +                 hash_table->etag = 1UL<<63;
    6.67              }
    6.68              else{
    6.69                  prev=hash_table;
    6.70                  next=prev->next;
    6.71                  while(next){
    6.72                      if(next->etag == tag){
    6.73 -                        prev->next=next->next;
    6.74 -                        cch_free(hcb,next);
    6.75 -                        hash_table->len--;
    6.76 +                        next->etag = 1UL<<63;
    6.77                          break;
    6.78                      }
    6.79                      prev=next;
    6.80 @@ -300,16 +277,14 @@ static void vhpt_purge(thash_cb_t *hcb, 
    6.81          hash_table = (thash_data_t *)ia64_thash(start);
    6.82          tag = ia64_ttag(start);
    6.83          if(hash_table->etag == tag ){
    6.84 -            __rem_hash_head(hcb, hash_table);
    6.85 +            hash_table->etag = 1UL<<63; 
    6.86          }
    6.87          else{
    6.88              prev=hash_table;
    6.89              next=prev->next;
    6.90              while(next){
    6.91                  if(next->etag == tag){
    6.92 -                    prev->next=next->next;
    6.93 -                    cch_free(hcb,next);
    6.94 -                    hash_table->len--;
    6.95 +                    next->etag = 1UL<<63;
    6.96                      break; 
    6.97                  }
    6.98                  prev=next;
    6.99 @@ -383,7 +358,6 @@ void vtlb_insert(thash_cb_t *hcb, u64 pt
   6.100          hash_table->page_flags = pte;
   6.101          hash_table->itir=itir;
   6.102          hash_table->etag=tag;
   6.103 -        hash_table->next = 0;
   6.104          return;
   6.105      }
   6.106      if (hash_table->len>=MAX_CCN_DEPTH){
   6.107 @@ -539,7 +513,6 @@ void thash_purge_all(VCPU *v)
   6.108          num--;
   6.109      }while(num);
   6.110      cch_mem_init(vhpt);
   6.111 -
   6.112      local_flush_tlb_all();
   6.113  }
   6.114  
     7.1 --- a/xen/arch/ia64/xen/xentime.c	Thu Jun 08 10:17:22 2006 -0600
     7.2 +++ b/xen/arch/ia64/xen/xentime.c	Thu Jun 08 11:00:09 2006 -0600
     7.3 @@ -124,7 +124,7 @@ xen_timer_interrupt (int irq, void *dev_
     7.4  #endif
     7.5  #endif
     7.6  
     7.7 -	if (!is_idle_domain(current->domain))
     7.8 +	if (!is_idle_domain(current->domain)&&!VMX_DOMAIN(current))
     7.9  		if (vcpu_timer_expired(current)) {
    7.10  			vcpu_pend_timer(current);
    7.11  			// ensure another timer interrupt happens even if domain doesn't
     8.1 --- a/xen/include/asm-ia64/vmx.h	Thu Jun 08 10:17:22 2006 -0600
     8.2 +++ b/xen/include/asm-ia64/vmx.h	Thu Jun 08 11:00:09 2006 -0600
     8.3 @@ -25,6 +25,8 @@
     8.4  #define RR7_SWITCH_SHIFT	12	/* 4k enough */
     8.5  #include <public/hvm/ioreq.h>
     8.6  
     8.7 +#define VCPU_LID(v) (((u64)(v)->vcpu_id)<<24)
     8.8 +
     8.9  extern void identify_vmx_feature(void);
    8.10  extern unsigned int vmx_enabled;
    8.11  extern void vmx_init_env(void);