ia64/xen-unstable

view tools/ioemu/hw/dma.c @ 6946:e703abaf6e3d

Add behaviour to the remove methods to remove the transaction's path itself. This allows us to write Remove(path) to remove the specified path rather than having to slice the path ourselves.
author emellor@ewan
date Sun Sep 18 14:42:13 2005 +0100 (2005-09-18)
parents 8e5fc5fe636c
children f7b43e5c42b9
line source
1 /*
2 * QEMU DMA emulation
3 *
4 * Copyright (c) 2003-2004 Vassili Karpov (malc)
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24 #include "vl.h"
26 /* #define DEBUG_DMA */
28 #define dolog(...) fprintf (stderr, "dma: " __VA_ARGS__)
29 #ifdef DEBUG_DMA
30 #define lwarn(...) fprintf (stderr, "dma: " __VA_ARGS__)
31 #define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__)
32 #define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__)
33 #else
34 #define lwarn(...)
35 #define linfo(...)
36 #define ldebug(...)
37 #endif
39 #define LENOFA(a) ((int) (sizeof(a)/sizeof(a[0])))
41 struct dma_regs {
42 int now[2];
43 uint16_t base[2];
44 uint8_t mode;
45 uint8_t page;
46 uint8_t pageh;
47 uint8_t dack;
48 uint8_t eop;
49 DMA_transfer_handler transfer_handler;
50 void *opaque;
51 };
53 #define ADDR 0
54 #define COUNT 1
56 static struct dma_cont {
57 uint8_t status;
58 uint8_t command;
59 uint8_t mask;
60 uint8_t flip_flop;
61 int dshift;
62 struct dma_regs regs[4];
63 } dma_controllers[2];
65 enum {
66 CMD_MEMORY_TO_MEMORY = 0x01,
67 CMD_FIXED_ADDRESS = 0x02,
68 CMD_BLOCK_CONTROLLER = 0x04,
69 CMD_COMPRESSED_TIME = 0x08,
70 CMD_CYCLIC_PRIORITY = 0x10,
71 CMD_EXTENDED_WRITE = 0x20,
72 CMD_LOW_DREQ = 0x40,
73 CMD_LOW_DACK = 0x80,
74 CMD_NOT_SUPPORTED = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS
75 | CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE
76 | CMD_LOW_DREQ | CMD_LOW_DACK
78 };
80 static int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0};
82 static void write_page (void *opaque, uint32_t nport, uint32_t data)
83 {
84 struct dma_cont *d = opaque;
85 int ichan;
87 ichan = channels[nport & 7];
88 if (-1 == ichan) {
89 dolog ("invalid channel %#x %#x\n", nport, data);
90 return;
91 }
92 d->regs[ichan].page = data;
93 }
95 static void write_pageh (void *opaque, uint32_t nport, uint32_t data)
96 {
97 struct dma_cont *d = opaque;
98 int ichan;
100 ichan = channels[nport & 7];
101 if (-1 == ichan) {
102 dolog ("invalid channel %#x %#x\n", nport, data);
103 return;
104 }
105 d->regs[ichan].pageh = data;
106 }
108 static uint32_t read_page (void *opaque, uint32_t nport)
109 {
110 struct dma_cont *d = opaque;
111 int ichan;
113 ichan = channels[nport & 7];
114 if (-1 == ichan) {
115 dolog ("invalid channel read %#x\n", nport);
116 return 0;
117 }
118 return d->regs[ichan].page;
119 }
121 static uint32_t read_pageh (void *opaque, uint32_t nport)
122 {
123 struct dma_cont *d = opaque;
124 int ichan;
126 ichan = channels[nport & 7];
127 if (-1 == ichan) {
128 dolog ("invalid channel read %#x\n", nport);
129 return 0;
130 }
131 return d->regs[ichan].pageh;
132 }
134 static inline void init_chan (struct dma_cont *d, int ichan)
135 {
136 struct dma_regs *r;
138 r = d->regs + ichan;
139 r->now[ADDR] = r->base[ADDR] << d->dshift;
140 r->now[COUNT] = 0;
141 }
143 static inline int getff (struct dma_cont *d)
144 {
145 int ff;
147 ff = d->flip_flop;
148 d->flip_flop = !ff;
149 return ff;
150 }
152 static uint32_t read_chan (void *opaque, uint32_t nport)
153 {
154 struct dma_cont *d = opaque;
155 int ichan, nreg, iport, ff, val, dir;
156 struct dma_regs *r;
158 iport = (nport >> d->dshift) & 0x0f;
159 ichan = iport >> 1;
160 nreg = iport & 1;
161 r = d->regs + ichan;
163 dir = ((r->mode >> 5) & 1) ? -1 : 1;
164 ff = getff (d);
165 if (nreg)
166 val = (r->base[COUNT] << d->dshift) - r->now[COUNT];
167 else
168 val = r->now[ADDR] + r->now[COUNT] * dir;
170 ldebug ("read_chan %#x -> %d\n", iport, val);
171 return (val >> (d->dshift + (ff << 3))) & 0xff;
172 }
174 static void write_chan (void *opaque, uint32_t nport, uint32_t data)
175 {
176 struct dma_cont *d = opaque;
177 int iport, ichan, nreg;
178 struct dma_regs *r;
180 iport = (nport >> d->dshift) & 0x0f;
181 ichan = iport >> 1;
182 nreg = iport & 1;
183 r = d->regs + ichan;
184 if (getff (d)) {
185 r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00);
186 init_chan (d, ichan);
187 } else {
188 r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff);
189 }
190 }
192 static void write_cont (void *opaque, uint32_t nport, uint32_t data)
193 {
194 struct dma_cont *d = opaque;
195 int iport, ichan = 0;
197 iport = (nport >> d->dshift) & 0x0f;
198 switch (iport) {
199 case 0x08: /* command */
200 if ((data != 0) && (data & CMD_NOT_SUPPORTED)) {
201 dolog ("command %#x not supported\n", data);
202 return;
203 }
204 d->command = data;
205 break;
207 case 0x09:
208 ichan = data & 3;
209 if (data & 4) {
210 d->status |= 1 << (ichan + 4);
211 }
212 else {
213 d->status &= ~(1 << (ichan + 4));
214 }
215 d->status &= ~(1 << ichan);
216 break;
218 case 0x0a: /* single mask */
219 if (data & 4)
220 d->mask |= 1 << (data & 3);
221 else
222 d->mask &= ~(1 << (data & 3));
223 break;
225 case 0x0b: /* mode */
226 {
227 ichan = data & 3;
228 #ifdef DEBUG_DMA
229 {
230 int op, ai, dir, opmode;
231 op = (data >> 2) & 3;
232 ai = (data >> 4) & 1;
233 dir = (data >> 5) & 1;
234 opmode = (data >> 6) & 3;
236 linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n",
237 ichan, op, ai, dir, opmode);
238 }
239 #endif
240 d->regs[ichan].mode = data;
241 break;
242 }
244 case 0x0c: /* clear flip flop */
245 d->flip_flop = 0;
246 break;
248 case 0x0d: /* reset */
249 d->flip_flop = 0;
250 d->mask = ~0;
251 d->status = 0;
252 d->command = 0;
253 break;
255 case 0x0e: /* clear mask for all channels */
256 d->mask = 0;
257 break;
259 case 0x0f: /* write mask for all channels */
260 d->mask = data;
261 break;
263 default:
264 dolog ("unknown iport %#x\n", iport);
265 break;
266 }
268 #ifdef DEBUG_DMA
269 if (0xc != iport) {
270 linfo ("write_cont: nport %#06x, ichan % 2d, val %#06x\n",
271 nport, ichan, data);
272 }
273 #endif
274 }
276 static uint32_t read_cont (void *opaque, uint32_t nport)
277 {
278 struct dma_cont *d = opaque;
279 int iport, val;
281 iport = (nport >> d->dshift) & 0x0f;
282 switch (iport) {
283 case 0x08: /* status */
284 val = d->status;
285 d->status &= 0xf0;
286 break;
287 case 0x0f: /* mask */
288 val = d->mask;
289 break;
290 default:
291 val = 0;
292 break;
293 }
295 ldebug ("read_cont: nport %#06x, iport %#04x val %#x\n", nport, iport, val);
296 return val;
297 }
299 int DMA_get_channel_mode (int nchan)
300 {
301 return dma_controllers[nchan > 3].regs[nchan & 3].mode;
302 }
304 void DMA_hold_DREQ (int nchan)
305 {
306 int ncont, ichan;
308 ncont = nchan > 3;
309 ichan = nchan & 3;
310 linfo ("held cont=%d chan=%d\n", ncont, ichan);
311 dma_controllers[ncont].status |= 1 << (ichan + 4);
312 }
314 void DMA_release_DREQ (int nchan)
315 {
316 int ncont, ichan;
318 ncont = nchan > 3;
319 ichan = nchan & 3;
320 linfo ("released cont=%d chan=%d\n", ncont, ichan);
321 dma_controllers[ncont].status &= ~(1 << (ichan + 4));
322 }
324 static void channel_run (int ncont, int ichan)
325 {
326 int n;
327 struct dma_regs *r = &dma_controllers[ncont].regs[ichan];
328 #ifdef DEBUG_DMA
329 int dir, opmode;
331 dir = (r->mode >> 5) & 1;
332 opmode = (r->mode >> 6) & 3;
334 if (dir) {
335 dolog ("DMA in address decrement mode\n");
336 }
337 if (opmode != 1) {
338 dolog ("DMA not in single mode select %#x\n", opmode);
339 }
340 #endif
342 r = dma_controllers[ncont].regs + ichan;
343 n = r->transfer_handler (r->opaque, ichan + (ncont << 2),
344 r->now[COUNT], (r->base[COUNT] + 1) << ncont);
345 r->now[COUNT] = n;
346 ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont);
347 }
349 void DMA_run (void)
350 {
351 struct dma_cont *d;
352 int icont, ichan;
354 d = dma_controllers;
356 for (icont = 0; icont < 2; icont++, d++) {
357 for (ichan = 0; ichan < 4; ichan++) {
358 int mask;
360 mask = 1 << ichan;
362 if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4))))
363 channel_run (icont, ichan);
364 }
365 }
366 }
368 void DMA_register_channel (int nchan,
369 DMA_transfer_handler transfer_handler,
370 void *opaque)
371 {
372 struct dma_regs *r;
373 int ichan, ncont;
375 ncont = nchan > 3;
376 ichan = nchan & 3;
378 r = dma_controllers[ncont].regs + ichan;
379 r->transfer_handler = transfer_handler;
380 r->opaque = opaque;
381 }
383 int DMA_read_memory (int nchan, void *buf, int pos, int len)
384 {
385 struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
386 target_ulong addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
388 if (r->mode & 0x20) {
389 int i;
390 uint8_t *p = buf;
392 cpu_physical_memory_read (addr - pos - len, buf, len);
393 /* What about 16bit transfers? */
394 for (i = 0; i < len >> 1; i++) {
395 uint8_t b = p[len - i - 1];
396 p[i] = b;
397 }
398 }
399 else
400 cpu_physical_memory_read (addr + pos, buf, len);
402 return len;
403 }
405 int DMA_write_memory (int nchan, void *buf, int pos, int len)
406 {
407 struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
408 target_ulong addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
410 if (r->mode & 0x20) {
411 int i;
412 uint8_t *p = buf;
414 cpu_physical_memory_write (addr - pos - len, buf, len);
415 /* What about 16bit transfers? */
416 for (i = 0; i < len; i++) {
417 uint8_t b = p[len - i - 1];
418 p[i] = b;
419 }
420 }
421 else
422 cpu_physical_memory_write (addr + pos, buf, len);
424 return len;
425 }
427 /* request the emulator to transfer a new DMA memory block ASAP */
428 void DMA_schedule(int nchan)
429 {
430 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
431 }
433 static void dma_reset(void *opaque)
434 {
435 struct dma_cont *d = opaque;
436 write_cont (d, (0x0d << d->dshift), 0);
437 }
439 /* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */
440 static void dma_init2(struct dma_cont *d, int base, int dshift,
441 int page_base, int pageh_base)
442 {
443 const static int page_port_list[] = { 0x1, 0x2, 0x3, 0x7 };
444 int i;
446 d->dshift = dshift;
447 for (i = 0; i < 8; i++) {
448 register_ioport_write (base + (i << dshift), 1, 1, write_chan, d);
449 register_ioport_read (base + (i << dshift), 1, 1, read_chan, d);
450 }
451 for (i = 0; i < LENOFA (page_port_list); i++) {
452 register_ioport_write (page_base + page_port_list[i], 1, 1,
453 write_page, d);
454 register_ioport_read (page_base + page_port_list[i], 1, 1,
455 read_page, d);
456 if (pageh_base >= 0) {
457 register_ioport_write (pageh_base + page_port_list[i], 1, 1,
458 write_pageh, d);
459 register_ioport_read (pageh_base + page_port_list[i], 1, 1,
460 read_pageh, d);
461 }
462 }
463 for (i = 0; i < 8; i++) {
464 register_ioport_write (base + ((i + 8) << dshift), 1, 1,
465 write_cont, d);
466 register_ioport_read (base + ((i + 8) << dshift), 1, 1,
467 read_cont, d);
468 }
469 qemu_register_reset(dma_reset, d);
470 dma_reset(d);
471 }
473 static void dma_save (QEMUFile *f, void *opaque)
474 {
475 struct dma_cont *d = opaque;
476 int i;
478 /* qemu_put_8s (f, &d->status); */
479 qemu_put_8s (f, &d->command);
480 qemu_put_8s (f, &d->mask);
481 qemu_put_8s (f, &d->flip_flop);
482 qemu_put_be32s (f, &d->dshift);
484 for (i = 0; i < 4; ++i) {
485 struct dma_regs *r = &d->regs[i];
486 qemu_put_be32s (f, &r->now[0]);
487 qemu_put_be32s (f, &r->now[1]);
488 qemu_put_be16s (f, &r->base[0]);
489 qemu_put_be16s (f, &r->base[1]);
490 qemu_put_8s (f, &r->mode);
491 qemu_put_8s (f, &r->page);
492 qemu_put_8s (f, &r->pageh);
493 qemu_put_8s (f, &r->dack);
494 qemu_put_8s (f, &r->eop);
495 }
496 }
498 static int dma_load (QEMUFile *f, void *opaque, int version_id)
499 {
500 struct dma_cont *d = opaque;
501 int i;
503 if (version_id != 1)
504 return -EINVAL;
506 /* qemu_get_8s (f, &d->status); */
507 qemu_get_8s (f, &d->command);
508 qemu_get_8s (f, &d->mask);
509 qemu_get_8s (f, &d->flip_flop);
510 qemu_get_be32s (f, &d->dshift);
512 for (i = 0; i < 4; ++i) {
513 struct dma_regs *r = &d->regs[i];
514 qemu_get_be32s (f, &r->now[0]);
515 qemu_get_be32s (f, &r->now[1]);
516 qemu_get_be16s (f, &r->base[0]);
517 qemu_get_be16s (f, &r->base[1]);
518 qemu_get_8s (f, &r->mode);
519 qemu_get_8s (f, &r->page);
520 qemu_get_8s (f, &r->pageh);
521 qemu_get_8s (f, &r->dack);
522 qemu_get_8s (f, &r->eop);
523 }
524 return 0;
525 }
527 void DMA_init (int high_page_enable)
528 {
529 dma_init2(&dma_controllers[0], 0x00, 0, 0x80,
530 high_page_enable ? 0x480 : -1);
531 dma_init2(&dma_controllers[1], 0xc0, 1, 0x88,
532 high_page_enable ? 0x488 : -1);
533 register_savevm ("dma", 0, 1, dma_save, dma_load, &dma_controllers[0]);
534 register_savevm ("dma", 1, 1, dma_save, dma_load, &dma_controllers[1]);
535 }