ia64/xen-unstable

view tools/ioemu/hw/i8254.c @ 9137:eb24eb6bc341

Fix some warnings when compiling tools.

Signed-off-by: Xin Li <xin.b.li@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Mar 04 10:25:05 2006 +0100 (2006-03-04)
parents f1b361b05bf3
children f7b43e5c42b9
line source
1 /*
2 * QEMU 8253/8254 interval timer emulation
3 *
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24 #include "vl.h"
25 #include <xenctrl.h>
26 #include <xen/hvm/ioreq.h>
28 //#define DEBUG_PIT
30 #define RW_STATE_LSB 1
31 #define RW_STATE_MSB 2
32 #define RW_STATE_WORD0 3
33 #define RW_STATE_WORD1 4
35 typedef struct PITChannelState {
36 int count; /* can be 65536 */
37 uint16_t latched_count;
38 uint8_t count_latched;
39 uint8_t status_latched;
40 uint8_t status;
41 uint8_t read_state;
42 uint8_t write_state;
43 uint8_t write_latch;
44 uint8_t rw_mode;
45 uint8_t mode;
46 uint8_t bcd; /* not supported */
47 uint8_t gate; /* timer start */
48 int64_t count_load_time;
49 /* irq handling */
50 int64_t next_transition_time;
51 QEMUTimer *irq_timer;
52 int irq;
53 int hvm_channel; /* Is this accelerated by HVM ? */
54 } PITChannelState;
56 struct PITState {
57 PITChannelState channels[3];
58 };
60 static PITState pit_state;
62 static void pit_irq_timer_update(PITChannelState *s, int64_t current_time);
64 /* currently operate which channel for hvm use */
65 int hvm_channel = -1;
66 extern FILE *logfile;
67 static int pit_get_count(PITChannelState *s)
68 {
69 uint64_t d;
70 int counter;
72 d = muldiv64(qemu_get_clock(vm_clock) - s->count_load_time, PIT_FREQ, ticks_per_sec);
73 switch(s->mode) {
74 case 0:
75 case 1:
76 case 4:
77 case 5:
78 counter = (s->count - d) & 0xffff;
79 break;
80 case 3:
81 /* XXX: may be incorrect for odd counts */
82 counter = s->count - ((2 * d) % s->count);
83 break;
84 default:
85 counter = s->count - (d % s->count);
86 break;
87 }
88 return counter;
89 }
91 /* get pit output bit */
92 static int pit_get_out1(PITChannelState *s, int64_t current_time)
93 {
94 uint64_t d;
95 int out;
97 d = muldiv64(current_time - s->count_load_time, PIT_FREQ, ticks_per_sec);
98 switch(s->mode) {
99 default:
100 case 0:
101 out = (d >= s->count);
102 break;
103 case 1:
104 out = (d < s->count);
105 break;
106 case 2:
107 if ((d % s->count) == 0 && d != 0)
108 out = 1;
109 else
110 out = 0;
111 break;
112 case 3:
113 out = (d % s->count) < ((s->count + 1) >> 1);
114 break;
115 case 4:
116 case 5:
117 out = (d == s->count);
118 break;
119 }
120 return out;
121 }
123 int pit_get_out(PITState *pit, int channel, int64_t current_time)
124 {
125 PITChannelState *s = &pit->channels[channel];
126 return pit_get_out1(s, current_time);
127 }
129 /* return -1 if no transition will occur. */
130 static int64_t pit_get_next_transition_time(PITChannelState *s,
131 int64_t current_time)
132 {
133 uint64_t d, next_time, base;
134 int period2;
136 d = muldiv64(current_time - s->count_load_time, PIT_FREQ, ticks_per_sec);
137 switch(s->mode) {
138 default:
139 case 0:
140 case 1:
141 if (d < s->count)
142 next_time = s->count;
143 else
144 return -1;
145 break;
146 case 2:
147 base = (d / s->count) * s->count;
148 if ((d - base) == 0 && d != 0)
149 next_time = base + s->count;
150 else
151 next_time = base + s->count + 1;
152 break;
153 case 3:
154 base = (d / s->count) * s->count;
155 period2 = ((s->count + 1) >> 1);
156 if ((d - base) < period2)
157 next_time = base + period2;
158 else
159 next_time = base + s->count;
160 break;
161 case 4:
162 case 5:
163 if (d < s->count)
164 next_time = s->count;
165 else if (d == s->count)
166 next_time = s->count + 1;
167 else
168 return -1;
169 break;
170 }
171 /* convert to timer units */
172 next_time = s->count_load_time + muldiv64(next_time, ticks_per_sec, PIT_FREQ);
173 /* fix potential rounding problems */
174 /* XXX: better solution: use a clock at PIT_FREQ Hz */
175 if (next_time <= current_time)
176 next_time = current_time + 1;
177 return next_time;
178 }
180 /* val must be 0 or 1 */
181 void pit_set_gate(PITState *pit, int channel, int val)
182 {
183 PITChannelState *s = &pit->channels[channel];
185 switch(s->mode) {
186 default:
187 case 0:
188 case 4:
189 /* XXX: just disable/enable counting */
190 break;
191 case 1:
192 case 5:
193 if (s->gate < val) {
194 /* restart counting on rising edge */
195 s->count_load_time = qemu_get_clock(vm_clock);
196 pit_irq_timer_update(s, s->count_load_time);
197 }
198 break;
199 case 2:
200 case 3:
201 if (s->gate < val) {
202 /* restart counting on rising edge */
203 s->count_load_time = qemu_get_clock(vm_clock);
204 pit_irq_timer_update(s, s->count_load_time);
205 }
206 /* XXX: disable/enable counting */
207 break;
208 }
209 s->gate = val;
210 }
212 int pit_get_gate(PITState *pit, int channel)
213 {
214 PITChannelState *s = &pit->channels[channel];
215 return s->gate;
216 }
218 void pit_reset_hvm_vectors()
219 {
220 extern shared_iopage_t *shared_page;
221 ioreq_t *req;
222 int irq, i;
223 PITChannelState *s;
225 irq = 0;
227 for(i = 0; i < 3; i++) {
228 if (pit_state.channels[i].hvm_channel)
229 break;
230 }
232 if (i == 3)
233 return;
235 /* Assumes just one HVM accelerated channel */
236 hvm_channel = i;
237 s = &pit_state.channels[hvm_channel];
238 fprintf(logfile,
239 "HVM_PIT:guest init pit channel %d!\n", hvm_channel);
240 req = &shared_page->vcpu_iodata[0].vp_ioreq;
242 req->state = STATE_IORESP_HOOK;
243 /*
244 * info passed to HV as following
245 * -- init count:16 bit, timer vec:8 bit,
246 * PIT channel(0~2):2 bit, rw mode:2 bit
247 */
248 req->u.data = s->count;
249 req->u.data |= (irq << 16);
250 req->u.data |= (hvm_channel << 24);
251 req->u.data |= ((s->rw_mode) << 26);
252 fprintf(logfile, "HVM_PIT:pass info 0x%"PRIx64" to HV!\n", req->u.data);
253 }
255 static inline void pit_load_count(PITChannelState *s, int val)
256 {
257 if (val == 0)
258 val = 0x10000;
259 s->count_load_time = qemu_get_clock(vm_clock);
260 s->count = val;
262 /* guest init this pit channel for periodic mode. we do not update related
263 * timer so the channel never send intr from device model*/
264 if (hvm_channel != -1 && s->mode == 2) {
265 pit_reset_hvm_vectors();
266 hvm_channel = -1;
267 }
269 /* pit_irq_timer_update(s, s->count_load_time);*/
270 }
272 /* if already latched, do not latch again */
273 static void pit_latch_count(PITChannelState *s)
274 {
275 if (!s->count_latched) {
276 s->latched_count = pit_get_count(s);
277 s->count_latched = s->rw_mode;
278 }
279 }
281 static void pit_ioport_write(void *opaque, uint32_t addr, uint32_t val)
282 {
283 PITState *pit = opaque;
284 int channel, access;
285 PITChannelState *s;
287 addr &= 3;
288 if (addr == 3) {
289 channel = val >> 6;
290 if (channel == 3) {
291 /* read back command */
292 for(channel = 0; channel < 3; channel++) {
293 s = &pit->channels[channel];
294 if (val & (2 << channel)) {
295 if (!(val & 0x20)) {
296 pit_latch_count(s);
297 }
298 if (!(val & 0x10) && !s->status_latched) {
299 /* status latch */
300 /* XXX: add BCD and null count */
301 s->status = (pit_get_out1(s, qemu_get_clock(vm_clock)) << 7) |
302 (s->rw_mode << 4) |
303 (s->mode << 1) |
304 s->bcd;
305 s->status_latched = 1;
306 }
307 }
308 }
309 } else {
310 s = &pit->channels[channel];
311 access = (val >> 4) & 3;
312 if (access == 0) {
313 pit_latch_count(s);
314 } else {
315 s->rw_mode = access;
316 s->read_state = access;
317 s->write_state = access;
319 s->mode = (val >> 1) & 7;
320 s->bcd = val & 1;
321 /* XXX: update irq timer ? */
322 }
323 }
324 } else {
325 s = &pit->channels[addr];
326 s->hvm_channel = 1;
327 hvm_channel = addr;
328 switch(s->write_state) {
329 default:
330 case RW_STATE_LSB:
331 pit_load_count(s, val);
332 break;
333 case RW_STATE_MSB:
334 pit_load_count(s, val << 8);
335 break;
336 case RW_STATE_WORD0:
337 s->write_latch = val;
338 s->write_state = RW_STATE_WORD1;
339 break;
340 case RW_STATE_WORD1:
341 pit_load_count(s, s->write_latch | (val << 8));
342 s->write_state = RW_STATE_WORD0;
343 break;
344 }
345 }
346 }
348 static uint32_t pit_ioport_read(void *opaque, uint32_t addr)
349 {
350 PITState *pit = opaque;
351 int ret, count;
352 PITChannelState *s;
354 addr &= 3;
355 s = &pit->channels[addr];
356 if (s->status_latched) {
357 s->status_latched = 0;
358 ret = s->status;
359 } else if (s->count_latched) {
360 switch(s->count_latched) {
361 default:
362 case RW_STATE_LSB:
363 ret = s->latched_count & 0xff;
364 s->count_latched = 0;
365 break;
366 case RW_STATE_MSB:
367 ret = s->latched_count >> 8;
368 s->count_latched = 0;
369 break;
370 case RW_STATE_WORD0:
371 ret = s->latched_count & 0xff;
372 s->count_latched = RW_STATE_MSB;
373 break;
374 }
375 } else {
376 switch(s->read_state) {
377 default:
378 case RW_STATE_LSB:
379 count = pit_get_count(s);
380 ret = count & 0xff;
381 break;
382 case RW_STATE_MSB:
383 count = pit_get_count(s);
384 ret = (count >> 8) & 0xff;
385 break;
386 case RW_STATE_WORD0:
387 count = pit_get_count(s);
388 ret = count & 0xff;
389 s->read_state = RW_STATE_WORD1;
390 break;
391 case RW_STATE_WORD1:
392 count = pit_get_count(s);
393 ret = (count >> 8) & 0xff;
394 s->read_state = RW_STATE_WORD0;
395 break;
396 }
397 }
398 return ret;
399 }
401 static void pit_irq_timer_update(PITChannelState *s, int64_t current_time)
402 {
403 int64_t expire_time;
404 int irq_level;
406 if (!s->irq_timer)
407 return;
408 expire_time = pit_get_next_transition_time(s, current_time);
409 irq_level = pit_get_out1(s, current_time);
410 pic_set_irq(s->irq, irq_level);
411 #ifdef DEBUG_PIT
412 printf("irq_level=%d next_delay=%f\n",
413 irq_level,
414 (double)(expire_time - current_time) / ticks_per_sec);
415 #endif
416 s->next_transition_time = expire_time;
417 if (expire_time != -1)
418 qemu_mod_timer(s->irq_timer, expire_time);
419 else
420 qemu_del_timer(s->irq_timer);
421 }
423 static void pit_irq_timer(void *opaque)
424 {
425 PITChannelState *s = opaque;
427 pit_irq_timer_update(s, s->next_transition_time);
428 }
430 static void pit_save(QEMUFile *f, void *opaque)
431 {
432 PITState *pit = opaque;
433 PITChannelState *s;
434 int i;
436 for(i = 0; i < 3; i++) {
437 s = &pit->channels[i];
438 qemu_put_be32s(f, &s->count);
439 qemu_put_be16s(f, &s->latched_count);
440 qemu_put_8s(f, &s->count_latched);
441 qemu_put_8s(f, &s->status_latched);
442 qemu_put_8s(f, &s->status);
443 qemu_put_8s(f, &s->read_state);
444 qemu_put_8s(f, &s->write_state);
445 qemu_put_8s(f, &s->write_latch);
446 qemu_put_8s(f, &s->rw_mode);
447 qemu_put_8s(f, &s->mode);
448 qemu_put_8s(f, &s->bcd);
449 qemu_put_8s(f, &s->gate);
450 qemu_put_be64s(f, &s->count_load_time);
451 if (s->irq_timer) {
452 qemu_put_be64s(f, &s->next_transition_time);
453 qemu_put_timer(f, s->irq_timer);
454 }
455 }
456 }
458 static int pit_load(QEMUFile *f, void *opaque, int version_id)
459 {
460 PITState *pit = opaque;
461 PITChannelState *s;
462 int i;
464 if (version_id != 1)
465 return -EINVAL;
467 for(i = 0; i < 3; i++) {
468 s = &pit->channels[i];
469 qemu_get_be32s(f, &s->count);
470 qemu_get_be16s(f, &s->latched_count);
471 qemu_get_8s(f, &s->count_latched);
472 qemu_get_8s(f, &s->status_latched);
473 qemu_get_8s(f, &s->status);
474 qemu_get_8s(f, &s->read_state);
475 qemu_get_8s(f, &s->write_state);
476 qemu_get_8s(f, &s->write_latch);
477 qemu_get_8s(f, &s->rw_mode);
478 qemu_get_8s(f, &s->mode);
479 qemu_get_8s(f, &s->bcd);
480 qemu_get_8s(f, &s->gate);
481 qemu_get_be64s(f, &s->count_load_time);
482 if (s->irq_timer) {
483 qemu_get_be64s(f, &s->next_transition_time);
484 qemu_get_timer(f, s->irq_timer);
485 }
486 }
487 return 0;
488 }
490 static void pit_reset(void *opaque)
491 {
492 PITState *pit = opaque;
493 PITChannelState *s;
494 int i;
496 for(i = 0;i < 3; i++) {
497 s = &pit->channels[i];
498 s->mode = 3;
499 s->gate = (i != 2);
500 pit_load_count(s, 0);
501 }
502 }
504 PITState *pit_init(int base, int irq)
505 {
506 PITState *pit = &pit_state;
507 PITChannelState *s;
509 s = &pit->channels[0];
510 /* the timer 0 is connected to an IRQ */
511 s->irq_timer = qemu_new_timer(vm_clock, pit_irq_timer, s);
512 s->irq = irq;
514 register_savevm("i8254", base, 1, pit_save, pit_load, pit);
516 qemu_register_reset(pit_reset, pit);
517 register_ioport_write(base, 4, 1, pit_ioport_write, pit);
518 register_ioport_read(base, 3, 1, pit_ioport_read, pit);
520 pit_reset(pit);
522 return pit;
523 }