ia64/xen-unstable

changeset 6614:0c0d929e787c

Attached are the patches for new ioemu communication mechanism. The new
mechanism provides richer I/O operation semantics, such as and,or,xor
operation on MMIO space. This is necessary for operating systems such
as Windows XP and Windows 2003.

This is the first part of a two part patch. This patch applies to ioemu.

Signed-Off-By: Leendert van Doorn <leendert@watson.ibm.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Sep 02 17:55:24 2005 +0000 (2005-09-02)
parents 0746ef61733b
children a1de77c1486c
files tools/ioemu/exec.c tools/ioemu/target-i386-dm/helper2.c tools/ioemu/vl.c
line diff
     1.1 --- a/tools/ioemu/exec.c	Fri Sep 02 17:54:53 2005 +0000
     1.2 +++ b/tools/ioemu/exec.c	Fri Sep 02 17:55:24 2005 +0000
     1.3 @@ -142,6 +142,10 @@ void cpu_set_log(int log_flags)
     1.4  #else
     1.5          setvbuf(logfile, NULL, _IOLBF, 0);
     1.6  #endif
     1.7 +/*
     1.8 +    	stdout = logfile;
     1.9 +    	stderr = logfile;
    1.10 +*/
    1.11      }
    1.12  }
    1.13  
    1.14 @@ -386,9 +390,6 @@ void cpu_physical_memory_rw(target_phys_
    1.15                      io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
    1.16                      l = 2;
    1.17                  } else {
    1.18 -                    if (l!=1){
    1.19 -                        fprintf(logfile, "ERROR 8 bit mmio\n");
    1.20 -                    }
    1.21                      /* 8 bit access */
    1.22                      val = ldub_raw(buf);
    1.23                      io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
     2.1 --- a/tools/ioemu/target-i386-dm/helper2.c	Fri Sep 02 17:54:53 2005 +0000
     2.2 +++ b/tools/ioemu/target-i386-dm/helper2.c	Fri Sep 02 17:55:24 2005 +0000
     2.3 @@ -169,133 +169,217 @@ ioreq_t* cpu_get_ioreq(void)
     2.4  unsigned long
     2.5  do_inp(CPUState *env, unsigned long addr, unsigned long size)
     2.6  {
     2.7 -  switch(size) {
     2.8 -      case 1:
     2.9 -        return cpu_inb(env, addr);
    2.10 -      case 2:
    2.11 -        return cpu_inw(env, addr);
    2.12 -      case 4:
    2.13 -        return cpu_inl(env, addr);
    2.14 -      default:
    2.15 -	fprintf(logfile, "inp: bad size: %lx %lx\n", addr, size);
    2.16 -        exit(-1);
    2.17 -  }
    2.18 +	switch(size) {
    2.19 +	case 1:
    2.20 +		return cpu_inb(env, addr);
    2.21 +	case 2:
    2.22 +		return cpu_inw(env, addr);
    2.23 +	case 4:
    2.24 +		return cpu_inl(env, addr);
    2.25 +	default:
    2.26 +		fprintf(logfile, "inp: bad size: %lx %lx\n", addr, size);
    2.27 +		exit(-1);
    2.28 +	}
    2.29  }
    2.30  
    2.31  void
    2.32  do_outp(CPUState *env, unsigned long addr, unsigned long size, 
    2.33          unsigned long val)
    2.34  {
    2.35 -  switch(size) {
    2.36 -      case 1:
    2.37 -        return cpu_outb(env, addr, val);
    2.38 -      case 2:
    2.39 -        return cpu_outw(env, addr, val);
    2.40 -      case 4:
    2.41 -        return cpu_outl(env, addr, val);
    2.42 -      default:
    2.43 -	fprintf(logfile, "outp: bad size: %lx %lx\n", addr, size);
    2.44 -        exit(-1);
    2.45 -  }
    2.46 +	switch(size) {
    2.47 +	case 1:
    2.48 +		return cpu_outb(env, addr, val);
    2.49 +	case 2:
    2.50 +		return cpu_outw(env, addr, val);
    2.51 +	case 4:
    2.52 +		return cpu_outl(env, addr, val);
    2.53 +	default:
    2.54 +		fprintf(logfile, "outp: bad size: %lx %lx\n", addr, size);
    2.55 +		exit(-1);
    2.56 +	}
    2.57  }
    2.58  
    2.59  extern void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
    2.60                                     int len, int is_write);
    2.61  
    2.62  static inline void
    2.63 -read_physical(target_phys_addr_t addr, unsigned long size, void *val)
    2.64 +read_physical(u64 addr, unsigned long size, void *val)
    2.65  {
    2.66 -        return cpu_physical_memory_rw(addr, val, size, 0);
    2.67 +        return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 0);
    2.68  }
    2.69  
    2.70  static inline void
    2.71 -write_physical(target_phys_addr_t addr, unsigned long size, void *val)
    2.72 +write_physical(u64 addr, unsigned long size, void *val)
    2.73  {
    2.74 -        return cpu_physical_memory_rw(addr, val, size, 1);
    2.75 +        return cpu_physical_memory_rw((target_phys_addr_t)addr, val, size, 1);
    2.76  }
    2.77  
    2.78 -//send the ioreq to device model
    2.79 -void cpu_dispatch_ioreq(CPUState *env, ioreq_t *req)
    2.80 +void
    2.81 +cpu_ioreq_pio(CPUState *env, ioreq_t *req)
    2.82  {
    2.83 -	int i;
    2.84 -	int sign;
    2.85 +	int i, sign;
    2.86  
    2.87 -	sign = (req->df) ? -1 : 1;
    2.88 +	sign = req->df ? -1 : 1;
    2.89  
    2.90 -	if ((!req->pdata_valid) && (req->dir == IOREQ_WRITE)) {
    2.91 -		if (req->size != 4) {
    2.92 -			// Bochs expects higher bits to be 0
    2.93 -			req->u.data &= (1UL << (8 * req->size))-1;
    2.94 -		}
    2.95 -	}
    2.96 +	if (req->dir == IOREQ_READ) {
    2.97 +		if (!req->pdata_valid) {
    2.98 +			req->u.data = do_inp(env, req->addr, req->size);
    2.99 +		} else {
   2.100 +			unsigned long tmp; 
   2.101  
   2.102 -	if (req->port_mm == 0){//port io
   2.103 -		if(req->dir == IOREQ_READ){//read
   2.104 -			if (!req->pdata_valid) {
   2.105 -				req->u.data = do_inp(env, req->addr, req->size);
   2.106 -			} else {
   2.107 -				unsigned long tmp; 
   2.108 -
   2.109 -				for (i = 0; i < req->count; i++) {
   2.110 -					tmp = do_inp(env, req->addr, req->size);
   2.111 -					write_physical((target_phys_addr_t)req->u.pdata + (sign * i * req->size), 
   2.112 -						       req->size, &tmp);
   2.113 -				}
   2.114 +			for (i = 0; i < req->count; i++) {
   2.115 +				tmp = do_inp(env, req->addr, req->size);
   2.116 +				write_physical((target_phys_addr_t) req->u.pdata
   2.117 +						+ (sign * i * req->size), 
   2.118 +					req->size, &tmp);
   2.119  			}
   2.120 -		} else if(req->dir == IOREQ_WRITE) {
   2.121 -			if (!req->pdata_valid) {
   2.122 -				do_outp(env, req->addr, req->size, req->u.data);
   2.123 -			} else {
   2.124 -				for (i = 0; i < req->count; i++) {
   2.125 -					unsigned long tmp;
   2.126 -
   2.127 -					read_physical((target_phys_addr_t)req->u.pdata + (sign * i * req->size), req->size, 
   2.128 -						      &tmp);
   2.129 -					do_outp(env, req->addr, req->size, tmp);
   2.130 -				}
   2.131 -			}
   2.132 -			
   2.133  		}
   2.134 -	} else if (req->port_mm == 1){//memory map io
   2.135 +	} else if (req->dir == IOREQ_WRITE) {
   2.136  		if (!req->pdata_valid) {
   2.137 -			//handle stos
   2.138 -			if(req->dir == IOREQ_READ) { //read
   2.139 -				for (i = 0; i < req->count; i++) {
   2.140 -					read_physical((target_phys_addr_t)req->addr + (sign * i * req->size), req->size, &req->u.data);
   2.141 -				}
   2.142 -			} else if(req->dir == IOREQ_WRITE) { //write
   2.143 -				for (i = 0; i < req->count; i++) {
   2.144 -					write_physical((target_phys_addr_t)req->addr + (sign * i * req->size), req->size, &req->u.data);
   2.145 -				}
   2.146 -			}
   2.147 +			do_outp(env, req->addr, req->size, req->u.data);
   2.148  		} else {
   2.149 -			//handle movs
   2.150 -			unsigned long tmp;
   2.151 -			if (req->dir == IOREQ_READ) {
   2.152 -				for (i = 0; i < req->count; i++) {
   2.153 -					read_physical((target_phys_addr_t)req->addr + (sign * i * req->size), req->size, &tmp);
   2.154 -					write_physical((target_phys_addr_t)req->u.pdata + (sign * i * req->size), req->size, &tmp);
   2.155 -				}
   2.156 -			} else if (req->dir == IOREQ_WRITE) {
   2.157 -				for (i = 0; i < req->count; i++) {
   2.158 -					read_physical((target_phys_addr_t)req->u.pdata + (sign * i * req->size), req->size, &tmp);
   2.159 -					write_physical((target_phys_addr_t)req->addr + (sign * i * req->size), req->size, &tmp);
   2.160 -				}
   2.161 +			for (i = 0; i < req->count; i++) {
   2.162 +				unsigned long tmp;
   2.163 +
   2.164 +				read_physical((target_phys_addr_t) req->u.pdata
   2.165 +						+ (sign * i * req->size),
   2.166 +					req->size, &tmp);
   2.167 +				do_outp(env, req->addr, req->size, tmp);
   2.168  			}
   2.169  		}
   2.170  	}
   2.171 -        /* No state change if state = STATE_IORESP_HOOK */
   2.172 -        if (req->state == STATE_IOREQ_INPROCESS)
   2.173 -                req->state = STATE_IORESP_READY;
   2.174 -	env->send_event = 1;
   2.175 +}
   2.176 +
   2.177 +void
   2.178 +cpu_ioreq_move(CPUState *env, ioreq_t *req)
   2.179 +{
   2.180 +	int i, sign;
   2.181 +
   2.182 +	sign = req->df ? -1 : 1;
   2.183 +
   2.184 +	if (!req->pdata_valid) {
   2.185 +		if (req->dir == IOREQ_READ) {
   2.186 +			for (i = 0; i < req->count; i++) {
   2.187 +				read_physical(req->addr
   2.188 +						+ (sign * i * req->size),
   2.189 +					req->size, &req->u.data);
   2.190 +			}
   2.191 +		} else if (req->dir == IOREQ_WRITE) {
   2.192 +			for (i = 0; i < req->count; i++) {
   2.193 +				write_physical(req->addr
   2.194 +						+ (sign * i * req->size),
   2.195 +					req->size, &req->u.data);
   2.196 +			}
   2.197 +		}
   2.198 +	} else {
   2.199 +		unsigned long tmp;
   2.200 +
   2.201 +		if (req->dir == IOREQ_READ) {
   2.202 +			for (i = 0; i < req->count; i++) {
   2.203 +				read_physical(req->addr
   2.204 +						+ (sign * i * req->size),
   2.205 +					req->size, &tmp);
   2.206 +				write_physical((target_phys_addr_t )req->u.pdata
   2.207 +						+ (sign * i * req->size),
   2.208 +					req->size, &tmp);
   2.209 +			}
   2.210 +		} else if (req->dir == IOREQ_WRITE) {
   2.211 +			for (i = 0; i < req->count; i++) {
   2.212 +				read_physical((target_phys_addr_t) req->u.pdata
   2.213 +						+ (sign * i * req->size),
   2.214 +					req->size, &tmp);
   2.215 +				write_physical(req->addr
   2.216 +						+ (sign * i * req->size),
   2.217 +					req->size, &tmp);
   2.218 +			}
   2.219 +		}
   2.220 +	}
   2.221 +}
   2.222 +
   2.223 +void
   2.224 +cpu_ioreq_and(CPUState *env, ioreq_t *req)
   2.225 +{
   2.226 +	unsigned long tmp1, tmp2;
   2.227 +
   2.228 +	if (req->pdata_valid != 0)
   2.229 +		hw_error("expected scalar value");
   2.230 +
   2.231 +	read_physical(req->addr, req->size, &tmp1);
   2.232 +	if (req->dir == IOREQ_WRITE) {
   2.233 +		tmp2 = tmp1 & (unsigned long) req->u.data;
   2.234 +		write_physical(req->addr, req->size, &tmp2);
   2.235 +	}
   2.236 +	req->u.data = tmp1;
   2.237 +}
   2.238 +
   2.239 +void
   2.240 +cpu_ioreq_or(CPUState *env, ioreq_t *req)
   2.241 +{
   2.242 +	unsigned long tmp1, tmp2;
   2.243 +
   2.244 +	if (req->pdata_valid != 0)
   2.245 +		hw_error("expected scalar value");
   2.246 +
   2.247 +	read_physical(req->addr, req->size, &tmp1);
   2.248 +	if (req->dir == IOREQ_WRITE) {
   2.249 +		tmp2 = tmp1 | (unsigned long) req->u.data;
   2.250 +		write_physical(req->addr, req->size, &tmp2);
   2.251 +	}
   2.252 +	req->u.data = tmp1;
   2.253 +}
   2.254 +
   2.255 +void
   2.256 +cpu_ioreq_xor(CPUState *env, ioreq_t *req)
   2.257 +{
   2.258 +	unsigned long tmp1, tmp2;
   2.259 +
   2.260 +	if (req->pdata_valid != 0)
   2.261 +		hw_error("expected scalar value");
   2.262 +
   2.263 +	read_physical(req->addr, req->size, &tmp1);
   2.264 +	if (req->dir == IOREQ_WRITE) {
   2.265 +		tmp2 = tmp1 ^ (unsigned long) req->u.data;
   2.266 +		write_physical(req->addr, req->size, &tmp2);
   2.267 +	}
   2.268 +	req->u.data = tmp1;
   2.269  }
   2.270  
   2.271  void
   2.272  cpu_handle_ioreq(CPUState *env)
   2.273  {
   2.274  	ioreq_t *req = cpu_get_ioreq();
   2.275 -	if (req)
   2.276 -		cpu_dispatch_ioreq(env, req);
   2.277 +
   2.278 +	if (req) {
   2.279 +		if ((!req->pdata_valid) && (req->dir == IOREQ_WRITE)) {
   2.280 +			if (req->size != 4)
   2.281 +				req->u.data &= (1UL << (8 * req->size))-1;
   2.282 +		}
   2.283 +
   2.284 +		switch (req->type) {
   2.285 +		case IOREQ_TYPE_PIO:
   2.286 +			cpu_ioreq_pio(env, req);
   2.287 +			break;
   2.288 +		case IOREQ_TYPE_COPY:
   2.289 +			cpu_ioreq_move(env, req);
   2.290 +			break;
   2.291 +		case IOREQ_TYPE_AND:
   2.292 +			cpu_ioreq_and(env, req);
   2.293 +			break;
   2.294 +		case IOREQ_TYPE_OR:
   2.295 +			cpu_ioreq_or(env, req);
   2.296 +			break;
   2.297 +		case IOREQ_TYPE_XOR:
   2.298 +			cpu_ioreq_xor(env, req);
   2.299 +			break;
   2.300 +		default:
   2.301 +			hw_error("Invalid ioreq type 0x%x", req->type);
   2.302 +		}
   2.303 +
   2.304 +		/* No state change if state = STATE_IORESP_HOOK */
   2.305 +		if (req->state == STATE_IOREQ_INPROCESS)
   2.306 +			req->state = STATE_IORESP_READY;
   2.307 +		env->send_event = 1;
   2.308 +	}
   2.309  }
   2.310  
   2.311  void
   2.312 @@ -321,7 +405,7 @@ do_interrupt(CPUState *env, int vector)
   2.313  
   2.314  	// Send a message on the event channel. Add the vector to the shared mem
   2.315  	// page.
   2.316 -	intr = &(shared_page->sp_global.pic_intr[0]);
   2.317 +	intr = (unsigned long *) &(shared_page->sp_global.pic_intr[0]);
   2.318  	atomic_set_bit(vector, intr);
   2.319          if (loglevel & CPU_LOG_INT)
   2.320                  fprintf(logfile, "injecting vector: %x\n", vector);
     3.1 --- a/tools/ioemu/vl.c	Fri Sep 02 17:54:53 2005 +0000
     3.2 +++ b/tools/ioemu/vl.c	Fri Sep 02 17:55:24 2005 +0000
     3.3 @@ -413,6 +413,11 @@ void hw_error(const char *fmt, ...)
     3.4      fprintf(stderr, "qemu: hardware error: ");
     3.5      vfprintf(stderr, fmt, ap);
     3.6      fprintf(stderr, "\n");
     3.7 +    if (logfile) {
     3.8 +	fprintf(logfile, "qemu: hardware error: ");
     3.9 +	vfprintf(logfile, fmt, ap);
    3.10 +	fprintf(logfile, "\n");
    3.11 +    }
    3.12      va_end(ap);
    3.13      abort();
    3.14  }