ia64/xen-unstable

changeset 15858:1c392e192379

[IA64] Cleanup within vti code

Makes more functions/variables static.
Indentation changes to make the code easier to read.

Signed-off-by: Tristan Gingold <tgingold@free.fr>
author Alex Williamson <alex.williamson@hp.com>
date Mon Sep 10 13:52:19 2007 -0600 (2007-09-10)
parents f16bff0934d7
children 7d9b20d91102
files xen/arch/ia64/vmx/mmio.c xen/arch/ia64/vmx/vmx_phy_mode.c xen/include/asm-ia64/vmx_phy_mode.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/mmio.c	Fri Sep 07 13:56:50 2007 -0600
     1.2 +++ b/xen/arch/ia64/vmx/mmio.c	Mon Sep 10 13:52:19 2007 -0600
     1.3 @@ -1,4 +1,3 @@
     1.4 -
     1.5  /* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
     1.6  /*
     1.7   * mmio.c: MMIO emulation components.
     1.8 @@ -53,7 +52,7 @@ static struct hvm_buffered_io_range
     1.9      &buffered_stdvga_range
    1.10  };
    1.11  
    1.12 -int hvm_buffered_io_intercept(ioreq_t *p)
    1.13 +static int hvm_buffered_io_intercept(ioreq_t *p)
    1.14  {
    1.15      struct vcpu *v = current;
    1.16      spinlock_t  *buffered_io_lock;
    1.17 @@ -119,26 +118,26 @@ static void low_mmio_access(VCPU *vcpu, 
    1.18      p->size = s;
    1.19      p->count = 1;
    1.20      p->dir = dir;
    1.21 -    if(dir==IOREQ_WRITE)     //write;
    1.22 +    if (dir==IOREQ_WRITE)     // write;
    1.23          p->data = *val;
    1.24      p->data_is_ptr = 0;
    1.25      p->type = 1;
    1.26      p->df = 0;
    1.27  
    1.28      p->io_count++;
    1.29 -    if(hvm_buffered_io_intercept(p)){
    1.30 +    if (hvm_buffered_io_intercept(p)) {
    1.31          p->state = STATE_IORESP_READY;
    1.32          vmx_io_assist(v);
    1.33 -        return ;
    1.34 -    }else 
    1.35 -    vmx_send_assist_req(v);
    1.36 -    if(dir==IOREQ_READ){ //read
    1.37 -        *val=p->data;
    1.38 +        return;
    1.39 +    } else 
    1.40 +        vmx_send_assist_req(v);
    1.41 +    if (dir == IOREQ_READ) { // read
    1.42 +        *val = p->data;
    1.43      }
    1.44      return;
    1.45  }
    1.46  
    1.47 -int vmx_ide_pio_intercept(ioreq_t *p, u64 *val)
    1.48 +static int vmx_ide_pio_intercept(ioreq_t *p, u64 *val)
    1.49  {
    1.50      struct buffered_piopage *pio_page =
    1.51          (void *)(current->domain->arch.hvm_domain.buffered_pio_va);
    1.52 @@ -146,11 +145,11 @@ int vmx_ide_pio_intercept(ioreq_t *p, u6
    1.53      uint32_t pointer, page_offset;
    1.54  
    1.55      if (p->addr == 0x1F0)
    1.56 -	piobuf = &pio_page->pio[PIO_BUFFER_IDE_PRIMARY];
    1.57 +        piobuf = &pio_page->pio[PIO_BUFFER_IDE_PRIMARY];
    1.58      else if (p->addr == 0x170)
    1.59 -	piobuf = &pio_page->pio[PIO_BUFFER_IDE_SECONDARY];
    1.60 +        piobuf = &pio_page->pio[PIO_BUFFER_IDE_SECONDARY];
    1.61      else
    1.62 -	return 0;
    1.63 +        return 0;
    1.64  
    1.65      if (p->size != 2 && p->size != 4)
    1.66          return 0;
    1.67 @@ -160,9 +159,9 @@ int vmx_ide_pio_intercept(ioreq_t *p, u6
    1.68  
    1.69      /* sanity check */
    1.70      if (page_offset + pointer < offsetof(struct buffered_piopage, buffer))
    1.71 -	return 0;
    1.72 +        return 0;
    1.73      if (page_offset + piobuf->data_end > PAGE_SIZE)
    1.74 -	return 0;
    1.75 +        return 0;
    1.76  
    1.77      if (pointer + p->size < piobuf->data_end) {
    1.78          uint8_t *bufp = (uint8_t *)pio_page + page_offset + pointer;
    1.79 @@ -189,7 +188,7 @@ int vmx_ide_pio_intercept(ioreq_t *p, u6
    1.80  
    1.81  #define TO_LEGACY_IO(pa)  (((pa)>>12<<2)|((pa)&0x3))
    1.82  
    1.83 -const char * guest_os_name[] = {
    1.84 +static const char * const guest_os_name[] = {
    1.85      "Unknown",
    1.86      "Windows 2003 server",
    1.87      "Linux",
    1.88 @@ -204,7 +203,7 @@ static inline void set_os_type(VCPU *v, 
    1.89          if (GOS_WINDOWS(v)) {
    1.90              struct xen_ia64_opt_feature optf;
    1.91  
    1.92 -	    /* Windows identity maps regions 4 & 5 */
    1.93 +            /* Windows identity maps regions 4 & 5 */
    1.94              optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG4;
    1.95              optf.on = XEN_IA64_OPTF_ON;
    1.96              optf.pgprot = (_PAGE_P|_PAGE_A|_PAGE_D|_PAGE_MA_WB|_PAGE_AR_RW);
    1.97 @@ -234,7 +233,7 @@ static void legacy_io_access(VCPU *vcpu,
    1.98      p->size = s;
    1.99      p->count = 1;
   1.100      p->dir = dir;
   1.101 -    if(dir==IOREQ_WRITE)     //write;
   1.102 +    if (dir == IOREQ_WRITE)     // write;
   1.103          p->data = *val;
   1.104      p->data_is_ptr = 0;
   1.105      p->type = 0;
   1.106 @@ -251,18 +250,18 @@ static void legacy_io_access(VCPU *vcpu,
   1.107          return;
   1.108  
   1.109      if (IS_ACPI_ADDR(p->addr) && vacpi_intercept(p, val))
   1.110 -	return;
   1.111 +        return;
   1.112  
   1.113      vmx_send_assist_req(v);
   1.114 -    if(dir==IOREQ_READ){ //read
   1.115 +    if (dir == IOREQ_READ) { // read
   1.116          *val=p->data;
   1.117      }
   1.118  #ifdef DEBUG_PCI
   1.119 -    if(dir==IOREQ_WRITE)
   1.120 -        if(p->addr == 0xcf8UL)
   1.121 +    if (dir == IOREQ_WRITE)
   1.122 +        if (p->addr == 0xcf8UL)
   1.123              printk("Write 0xcf8, with val [0x%lx]\n", p->data);
   1.124      else
   1.125 -        if(p->addr == 0xcfcUL)
   1.126 +        if (p->addr == 0xcfcUL)
   1.127              printk("Read 0xcfc, with val [0x%lx]\n", p->data);
   1.128  #endif //DEBUG_PCI
   1.129      return;
   1.130 @@ -270,9 +269,8 @@ static void legacy_io_access(VCPU *vcpu,
   1.131  
   1.132  static void mmio_access(VCPU *vcpu, u64 src_pa, u64 *dest, size_t s, int ma, int dir)
   1.133  {
   1.134 -    //mmio_type_t iot;
   1.135      unsigned long iot;
   1.136 -    iot=__gpfn_is_io(vcpu->domain, src_pa>>PAGE_SHIFT);
   1.137 +    iot = __gpfn_is_io(vcpu->domain, src_pa >> PAGE_SHIFT);
   1.138  
   1.139      perfc_incra(vmx_mmio_access, iot >> 56);
   1.140      switch (iot) {
   1.141 @@ -288,11 +286,11 @@ static void mmio_access(VCPU *vcpu, u64 
   1.142      case GPFN_GFW:
   1.143          break;
   1.144      case GPFN_IOSAPIC:
   1.145 -	if (!dir)
   1.146 -	    viosapic_write(vcpu, src_pa, s, *dest);
   1.147 -	else
   1.148 -	    *dest = viosapic_read(vcpu, src_pa, s);
   1.149 -	break;
   1.150 +        if (!dir)
   1.151 +            viosapic_write(vcpu, src_pa, s, *dest);
   1.152 +        else
   1.153 +            *dest = viosapic_read(vcpu, src_pa, s);
   1.154 +        break;
   1.155      case GPFN_FRAME_BUFFER:
   1.156      case GPFN_LOW_MMIO:
   1.157          low_mmio_access(vcpu, src_pa, dest, s, dir);
   1.158 @@ -322,67 +320,68 @@ void emulate_io_inst(VCPU *vcpu, u64 pad
   1.159      size_t size;
   1.160      u64 data, post_update, slot1a, slot1b, temp;
   1.161      INST64 inst;
   1.162 -    regs=vcpu_regs(vcpu);
   1.163 +
   1.164 +    regs = vcpu_regs(vcpu);
   1.165      if (IA64_RETRY == __vmx_get_domain_bundle(regs->cr_iip, &bundle)) {
   1.166          /* if fetch code fail, return and try again */
   1.167          return;
   1.168      }
   1.169      slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
   1.170 -    if (!slot) inst.inst = bundle.slot0;
   1.171 +    if (!slot)
   1.172 +        inst.inst = bundle.slot0;
   1.173      else if (slot == 1){
   1.174 -        slot1a=bundle.slot1a;
   1.175 -        slot1b=bundle.slot1b;
   1.176 -        inst.inst =slot1a + (slot1b<<18);
   1.177 +        slot1a = bundle.slot1a;
   1.178 +        slot1b = bundle.slot1b;
   1.179 +        inst.inst = slot1a + (slot1b << 18);
   1.180      }
   1.181 -    else if (slot == 2) inst.inst = bundle.slot2;
   1.182 +    else if (slot == 2)
   1.183 +        inst.inst = bundle.slot2;
   1.184  
   1.185  
   1.186      // Integer Load/Store
   1.187 -    if(inst.M1.major==4&&inst.M1.m==0&&inst.M1.x==0){
   1.188 +    if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) {
   1.189          inst_type = SL_INTEGER;  //
   1.190 -        size=(inst.M1.x6&0x3);
   1.191 -        if((inst.M1.x6>>2)>0xb){      // write
   1.192 -            dir=IOREQ_WRITE;     //write
   1.193 -            vcpu_get_gr_nat(vcpu,inst.M4.r2,&data);
   1.194 -        }else if((inst.M1.x6>>2)<0xb){   //  read
   1.195 -            dir=IOREQ_READ;
   1.196 +        size = (inst.M1.x6 & 0x3);
   1.197 +        if ((inst.M1.x6 >> 2) > 0xb) {
   1.198 +            dir = IOREQ_WRITE;     // write
   1.199 +            vcpu_get_gr_nat(vcpu, inst.M4.r2, &data);
   1.200 +        } else if ((inst.M1.x6 >> 2) < 0xb) {   // read
   1.201 +            dir = IOREQ_READ;
   1.202          }
   1.203      }
   1.204      // Integer Load + Reg update
   1.205 -    else if(inst.M2.major==4&&inst.M2.m==1&&inst.M2.x==0){
   1.206 +    else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) {
   1.207          inst_type = SL_INTEGER;
   1.208          dir = IOREQ_READ;     //write
   1.209 -        size = (inst.M2.x6&0x3);
   1.210 -        vcpu_get_gr_nat(vcpu,inst.M2.r3,&temp);
   1.211 -        vcpu_get_gr_nat(vcpu,inst.M2.r2,&post_update);
   1.212 +        size = (inst.M2.x6 & 0x3);
   1.213 +        vcpu_get_gr_nat(vcpu, inst.M2.r3, &temp);
   1.214 +        vcpu_get_gr_nat(vcpu, inst.M2.r2, &post_update);
   1.215          temp += post_update;
   1.216 -        vcpu_set_gr(vcpu,inst.M2.r3,temp,0);
   1.217 +        vcpu_set_gr(vcpu, inst.M2.r3, temp, 0);
   1.218      }
   1.219      // Integer Load/Store + Imm update
   1.220 -    else if(inst.M3.major==5){
   1.221 +    else if (inst.M3.major == 5) {
   1.222          inst_type = SL_INTEGER;  //
   1.223 -        size=(inst.M3.x6&0x3);
   1.224 -        if((inst.M5.x6>>2)>0xb){      // write
   1.225 -            dir=IOREQ_WRITE;     //write
   1.226 -            vcpu_get_gr_nat(vcpu,inst.M5.r2,&data);
   1.227 -            vcpu_get_gr_nat(vcpu,inst.M5.r3,&temp);
   1.228 -            post_update = (inst.M5.i<<7)+inst.M5.imm7;
   1.229 -            if(inst.M5.s)
   1.230 +        size = (inst.M3.x6 & 0x3);
   1.231 +        if ((inst.M5.x6 >> 2) > 0xb) {      // write
   1.232 +            dir = IOREQ_WRITE;     // write
   1.233 +            vcpu_get_gr_nat(vcpu, inst.M5.r2, &data);
   1.234 +            vcpu_get_gr_nat(vcpu, inst.M5.r3, &temp);
   1.235 +            post_update = (inst.M5.i << 7) + inst.M5.imm7;
   1.236 +            if (inst.M5.s)
   1.237                  temp -= post_update;
   1.238              else
   1.239                  temp += post_update;
   1.240 -            vcpu_set_gr(vcpu,inst.M5.r3,temp,0);
   1.241 -
   1.242 -        }else if((inst.M3.x6>>2)<0xb){   //  read
   1.243 -            dir=IOREQ_READ;
   1.244 -            vcpu_get_gr_nat(vcpu,inst.M3.r3,&temp);
   1.245 -            post_update = (inst.M3.i<<7)+inst.M3.imm7;
   1.246 -            if(inst.M3.s)
   1.247 +            vcpu_set_gr(vcpu, inst.M5.r3, temp, 0);
   1.248 +        } else if ((inst.M3.x6 >> 2) < 0xb) {   // read
   1.249 +            dir = IOREQ_READ;
   1.250 +            vcpu_get_gr_nat(vcpu, inst.M3.r3, &temp);
   1.251 +            post_update = (inst.M3.i << 7) + inst.M3.imm7;
   1.252 +            if (inst.M3.s)
   1.253                  temp -= post_update;
   1.254              else
   1.255                  temp += post_update;
   1.256 -            vcpu_set_gr(vcpu,inst.M3.r3,temp,0);
   1.257 -
   1.258 +            vcpu_set_gr(vcpu, inst.M3.r3, temp, 0);
   1.259          }
   1.260      }
   1.261      // Floating-point spill
   1.262 @@ -401,41 +400,43 @@ void emulate_io_inst(VCPU *vcpu, u64 pad
   1.263          size = 3;
   1.264      }
   1.265      // Floating-point spill + Imm update
   1.266 -    else if(inst.M10.major==7&&inst.M10.x6==0x3B){
   1.267 +    else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) {
   1.268          struct ia64_fpreg v;
   1.269 -	inst_type=SL_FLOATING;
   1.270 -	dir=IOREQ_WRITE;
   1.271 -	vcpu_get_fpreg(vcpu,inst.M10.f2,&v);
   1.272 -	vcpu_get_gr_nat(vcpu,inst.M10.r3,&temp);
   1.273 -	post_update = (inst.M10.i<<7)+inst.M10.imm7;
   1.274 -	if(inst.M10.s)
   1.275 +
   1.276 +        inst_type = SL_FLOATING;
   1.277 +        dir = IOREQ_WRITE;
   1.278 +        vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
   1.279 +        vcpu_get_gr_nat(vcpu, inst.M10.r3, &temp);
   1.280 +        post_update = (inst.M10.i << 7) + inst.M10.imm7;
   1.281 +        if (inst.M10.s)
   1.282              temp -= post_update;
   1.283 -	else
   1.284 +        else
   1.285              temp += post_update;
   1.286 -	vcpu_set_gr(vcpu,inst.M10.r3,temp,0);
   1.287 +        vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
   1.288  
   1.289 -	/* Write high word.
   1.290 -	   FIXME: this is a kludge!  */
   1.291 -	v.u.bits[1] &= 0x3ffff;
   1.292 -	mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE);
   1.293 -	data = v.u.bits[0];
   1.294 -	size = 3;
   1.295 +        /* Write high word.
   1.296 +           FIXME: this is a kludge!  */
   1.297 +        v.u.bits[1] &= 0x3ffff;
   1.298 +        mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE);
   1.299 +        data = v.u.bits[0];
   1.300 +        size = 3;
   1.301      }
   1.302      // Floating-point stf8 + Imm update
   1.303 -    else if(inst.M10.major==7&&inst.M10.x6==0x31){
   1.304 +    else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) {
   1.305          struct ia64_fpreg v;
   1.306 -	inst_type=SL_FLOATING;
   1.307 -	dir=IOREQ_WRITE;
   1.308 -	size=3;
   1.309 -	vcpu_get_fpreg(vcpu,inst.M10.f2,&v);
   1.310 -	data = v.u.bits[0]; /* Significand.  */
   1.311 -	vcpu_get_gr_nat(vcpu,inst.M10.r3,&temp);
   1.312 -	post_update = (inst.M10.i<<7)+inst.M10.imm7;
   1.313 -	if(inst.M10.s)
   1.314 +
   1.315 +        inst_type = SL_FLOATING;
   1.316 +        dir = IOREQ_WRITE;
   1.317 +        size = 3;
   1.318 +        vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
   1.319 +        data = v.u.bits[0]; /* Significand.  */
   1.320 +        vcpu_get_gr_nat(vcpu, inst.M10.r3, &temp);
   1.321 +        post_update = (inst.M10.i << 7) + inst.M10.imm7;
   1.322 +        if (inst.M10.s)
   1.323              temp -= post_update;
   1.324 -	else
   1.325 +        else
   1.326              temp += post_update;
   1.327 -	vcpu_set_gr(vcpu,inst.M10.r3,temp,0);
   1.328 +        vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
   1.329      }
   1.330  //    else if(inst.M6.major==6&&inst.M6.m==0&&inst.M6.x==0&&inst.M6.x6==3){
   1.331  //        inst_type=SL_FLOATING;  //fp
   1.332 @@ -443,71 +444,53 @@ void emulate_io_inst(VCPU *vcpu, u64 pad
   1.333  //        size=3;     //ldfd
   1.334  //    }
   1.335      //  lfetch - do not perform accesses.
   1.336 -    else if(inst.M15.major==7&&inst.M15.x6>=0x2c&&inst.M15.x6<=0x2f){
   1.337 -	vcpu_get_gr_nat(vcpu,inst.M15.r3,&temp);
   1.338 -	post_update = (inst.M15.i<<7)+inst.M15.imm7;
   1.339 -	if(inst.M15.s)
   1.340 +    else if (inst.M15.major== 7 && inst.M15.x6 >=0x2c && inst.M15.x6 <= 0x2f) {
   1.341 +        vcpu_get_gr_nat(vcpu, inst.M15.r3, &temp);
   1.342 +        post_update = (inst.M15.i << 7) + inst.M15.imm7;
   1.343 +        if (inst.M15.s)
   1.344              temp -= post_update;
   1.345 -	else
   1.346 +        else
   1.347              temp += post_update;
   1.348 -	vcpu_set_gr(vcpu,inst.M15.r3,temp,0);
   1.349 +        vcpu_set_gr(vcpu, inst.M15.r3, temp, 0);
   1.350  
   1.351 -	vcpu_increment_iip(vcpu);
   1.352 -	return;
   1.353 -    }
   1.354 -    // Floating-point Load Pair + Imm ldfp8 M12
   1.355 -    else if(inst.M12.major==6&&inst.M12.m==1&&inst.M12.x==1&&inst.M12.x6==1){
   1.356 -        struct ia64_fpreg v;
   1.357 -        inst_type=SL_FLOATING;
   1.358 -        dir = IOREQ_READ;
   1.359 -        size = 8;     //ldfd
   1.360 -        mmio_access(vcpu, padr, &data, size, ma, dir);
   1.361 -        v.u.bits[0]=data;
   1.362 -        v.u.bits[1]=0x1003E;
   1.363 -        vcpu_set_fpreg(vcpu,inst.M12.f1,&v);
   1.364 -        padr += 8;
   1.365 -        mmio_access(vcpu, padr, &data, size, ma, dir);
   1.366 -        v.u.bits[0]=data;
   1.367 -        v.u.bits[1]=0x1003E;
   1.368 -        vcpu_set_fpreg(vcpu,inst.M12.f2,&v);
   1.369 -        padr += 8;
   1.370 -        vcpu_set_gr(vcpu,inst.M12.r3,padr,0);
   1.371          vcpu_increment_iip(vcpu);
   1.372          return;
   1.373      }
   1.374 -    else{
   1.375 +    // Floating-point Load Pair + Imm ldfp8 M12
   1.376 +    else if (inst.M12.major == 6 && inst.M12.m == 1
   1.377 +             && inst.M12.x == 1 && inst.M12.x6 == 1) {
   1.378 +        struct ia64_fpreg v;
   1.379 +
   1.380 +        inst_type = SL_FLOATING;
   1.381 +        dir = IOREQ_READ;
   1.382 +        size = 8;     //ldfd
   1.383 +        mmio_access(vcpu, padr, &data, size, ma, dir);
   1.384 +        v.u.bits[0] = data;
   1.385 +        v.u.bits[1] = 0x1003E;
   1.386 +        vcpu_set_fpreg(vcpu, inst.M12.f1, &v);
   1.387 +        padr += 8;
   1.388 +        mmio_access(vcpu, padr, &data, size, ma, dir);
   1.389 +        v.u.bits[0] = data;
   1.390 +        v.u.bits[1] = 0x1003E;
   1.391 +        vcpu_set_fpreg(vcpu, inst.M12.f2, &v);
   1.392 +        padr += 8;
   1.393 +        vcpu_set_gr(vcpu,inst.M12.r3,padr, 0);
   1.394 +        vcpu_increment_iip(vcpu);
   1.395 +        return;
   1.396 +    }
   1.397 +    else {
   1.398          panic_domain
   1.399 -	  (NULL,"This memory access instr can't be emulated: %lx pc=%lx\n ",
   1.400 -	   inst.inst, regs->cr_iip);
   1.401 +            (NULL, "This memory access instr can't be emulated: %lx pc=%lx\n",
   1.402 +             inst.inst, regs->cr_iip);
   1.403      }
   1.404  
   1.405      size = 1 << size;
   1.406 -    if(dir==IOREQ_WRITE){
   1.407 -        mmio_access(vcpu, padr, &data, size, ma, dir);
   1.408 -    }else{
   1.409 -        mmio_access(vcpu, padr, &data, size, ma, dir);
   1.410 -        if(inst_type==SL_INTEGER){       //gp
   1.411 -            vcpu_set_gr(vcpu,inst.M1.r1,data,0);
   1.412 -        }else{
   1.413 +    mmio_access(vcpu, padr, &data, size, ma, dir);
   1.414 +    if (dir == IOREQ_READ) {
   1.415 +        if (inst_type == SL_INTEGER) {
   1.416 +            vcpu_set_gr(vcpu, inst.M1.r1, data, 0);
   1.417 +        } else {
   1.418              panic_domain(NULL, "Don't support ldfd now !");
   1.419 -/*            switch(inst.M6.f1){
   1.420 -
   1.421 -            case 6:
   1.422 -                regs->f6=(struct ia64_fpreg)data;
   1.423 -            case 7:
   1.424 -                regs->f7=(struct ia64_fpreg)data;
   1.425 -            case 8:
   1.426 -                regs->f8=(struct ia64_fpreg)data;
   1.427 -            case 9:
   1.428 -                regs->f9=(struct ia64_fpreg)data;
   1.429 -            case 10:
   1.430 -                regs->f10=(struct ia64_fpreg)data;
   1.431 -            case 11:
   1.432 -                regs->f11=(struct ia64_fpreg)data;
   1.433 -            default :
   1.434 -                ia64_ldfs(inst.M6.f1,&data);
   1.435 -            }
   1.436 -*/
   1.437          }
   1.438      }
   1.439      vcpu_increment_iip(vcpu);
     2.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c	Fri Sep 07 13:56:50 2007 -0600
     2.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c	Mon Sep 10 13:52:19 2007 -0600
     2.3 @@ -28,7 +28,8 @@
     2.4  #include <xen/sched.h>
     2.5  #include <asm/pgtable.h>
     2.6  #include <asm/vmmu.h>
     2.7 -int valid_mm_mode[8] = {
     2.8 +
     2.9 +static const int valid_mm_mode[8] = {
    2.10      GUEST_PHYS, /* (it, dt, rt) -> (0, 0, 0) */
    2.11      INV_MODE,
    2.12      INV_MODE,
    2.13 @@ -47,7 +48,7 @@ int valid_mm_mode[8] = {
    2.14   * - If gva happens to be rr0 and rr4, only allowed case is identity
    2.15   *   mapping (gva=gpa), or panic! (How?)
    2.16   */
    2.17 -int mm_switch_table[8][8] = {
    2.18 +static const int mm_switch_table[8][8] = {
    2.19      /*  2004/09/12(Kevin): Allow switch to self */
    2.20          /*
    2.21           *  (it,dt,rt): (0,0,0) -> (1,1,1)
    2.22 @@ -329,7 +330,7 @@ void
    2.23  prepare_if_physical_mode(VCPU *vcpu)
    2.24  {
    2.25      if (is_physical_mode(vcpu)) {
    2.26 -	vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
    2.27 +        vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
    2.28          switch_to_virtual_rid(vcpu);
    2.29      }
    2.30      return;
     3.1 --- a/xen/include/asm-ia64/vmx_phy_mode.h	Fri Sep 07 13:56:50 2007 -0600
     3.2 +++ b/xen/include/asm-ia64/vmx_phy_mode.h	Mon Sep 10 13:52:19 2007 -0600
     3.3 @@ -84,8 +84,6 @@
     3.4  #define XEN_RR7_RID    (0xf00010)
     3.5  #define GUEST_IN_PHY    0x1
     3.6  #define GUEST_PHY_EMUL	0x2
     3.7 -extern int valid_mm_mode[];
     3.8 -extern int mm_switch_table[][8];
     3.9  extern void physical_mode_init(VCPU *);
    3.10  extern void switch_to_physical_rid(VCPU *);
    3.11  extern void switch_to_virtual_rid(VCPU *vcpu);