ia64/xen-unstable

changeset 7541:602f7fc3e1b1

Originally in qemu when a IDE dma transfer is started which is triggered
by access to 0xc000 port in VMX, qemu will blocked there until it
finished. And during that block, there are extra dom switches between
dom0 and idle domain, and VMX can't be resumed. By making real IDE dma
tranfer run in another thread, qemu will return and VMX resume on time
and there are fewer cycle wasted.

Signed-off-by: Ke Yu <ke.yu@intel.com>
Signed-off-by: Xiaowei Yang <xiaowei.yang@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Oct 27 17:29:39 2005 +0100 (2005-10-27)
parents d6ebcfc5a30b
children f8087c9297aa
files tools/ioemu/hw/ide.c
line diff
     1.1 --- a/tools/ioemu/hw/ide.c	Thu Oct 27 17:27:04 2005 +0100
     1.2 +++ b/tools/ioemu/hw/ide.c	Thu Oct 27 17:29:39 2005 +0100
     1.3 @@ -22,6 +22,7 @@
     1.4   * THE SOFTWARE.
     1.5   */
     1.6  #include "vl.h"
     1.7 +#include <pthread.h>
     1.8  
     1.9  /* debug IDE devices */
    1.10  //#define DEBUG_IDE
    1.11 @@ -360,6 +361,48 @@ typedef struct PCIIDEState {
    1.12      BMDMAState bmdma[2];
    1.13  } PCIIDEState;
    1.14  
    1.15 +#define DMA_MULTI_THREAD
    1.16 +
    1.17 +#ifdef DMA_MULTI_THREAD
    1.18 +
    1.19 +static int file_pipes[2];
    1.20 +
    1.21 +static void ide_dma_loop(BMDMAState *bm);
    1.22 +static void dma_thread_loop(BMDMAState *bm);
    1.23 +
    1.24 +static void *dma_thread_func(void* opaque)
    1.25 +{
    1.26 +    BMDMAState* req;
    1.27 +
    1.28 +    while (read(file_pipes[0], &req, sizeof(req))) {
    1.29 +        dma_thread_loop(req);
    1.30 +    }
    1.31 +
    1.32 +    return NULL;
    1.33 +}
    1.34 +
    1.35 +static void dma_create_thread()
    1.36 +{
    1.37 +    pthread_t tid;
    1.38 +    int rt;
    1.39 +
    1.40 +    if (pipe(file_pipes) != 0){
    1.41 +        fprintf(stderr, "create pipe failed\n");
    1.42 +        exit(1);
    1.43 +    }
    1.44 +
    1.45 +    if ( (rt = pthread_create(&tid, NULL, dma_thread_func, NULL)) ) {
    1.46 +        fprintf(stderr, "Oops, dma thread creation failed, errno=%d\n", rt);
    1.47 +        exit(1);
    1.48 +    }
    1.49 +
    1.50 +    if ( (rt = pthread_detach(tid)) ) {
    1.51 +        fprintf(stderr, "Oops, dma thread detachment failed, errno=%d\n", rt);
    1.52 +        exit(1);
    1.53 +    }
    1.54 +}
    1.55 +#endif //DMA_MULTI_THREAD
    1.56 +
    1.57  static void ide_dma_start(IDEState *s, IDEDMAFunc *dma_cb);
    1.58  
    1.59  static void padstr(char *str, const char *src, int len)
    1.60 @@ -1978,8 +2021,16 @@ static void ide_map(PCIDevice *pci_dev, 
    1.61  
    1.62  /* XXX: full callback usage to prepare non blocking I/Os support -
    1.63     error handling */
    1.64 +#ifdef DMA_MULTI_THREAD
    1.65  static void ide_dma_loop(BMDMAState *bm)
    1.66  {
    1.67 +    write(file_pipes[1], &bm, sizeof(bm));
    1.68 +}
    1.69 +static void dma_thread_loop(BMDMAState *bm)
    1.70 +#else 
    1.71 +static void ide_dma_loop(BMDMAState *bm)
    1.72 +#endif //DMA_MULTI_THREAD
    1.73 +{
    1.74      struct {
    1.75          uint32_t addr;
    1.76          uint32_t size;
    1.77 @@ -2166,6 +2217,9 @@ void pci_ide_init(PCIBus *bus, BlockDriv
    1.78          d->ide_if[i].pci_dev = (PCIDevice *)d;
    1.79      ide_init2(&d->ide_if[0], 16, hd_table[0], hd_table[1]);
    1.80      ide_init2(&d->ide_if[2], 16, hd_table[2], hd_table[3]);
    1.81 +#ifdef DMA_MULTI_THREAD    
    1.82 +    dma_create_thread();
    1.83 +#endif //DMA_MULTI_THREAD    
    1.84  }
    1.85  
    1.86  /* hd_table must contain 4 block drivers */
    1.87 @@ -2196,6 +2250,9 @@ void pci_piix3_ide_init(PCIBus *bus, Blo
    1.88      ide_init2(&d->ide_if[2], 15, hd_table[2], hd_table[3]);
    1.89      ide_init_ioport(&d->ide_if[0], 0x1f0, 0x3f6);
    1.90      ide_init_ioport(&d->ide_if[2], 0x170, 0x376);
    1.91 +#ifdef DMA_MULTI_THREAD    
    1.92 +    dma_create_thread();
    1.93 +#endif //DMA_MULTI_THREAD    
    1.94  }
    1.95  
    1.96  /***********************************************************/