ia64/xen-unstable

changeset 12000:1e6b0a8a0822

[IA64][HVM] Add buffer IO mechanism for Xen/VTi domain. Current
implementation can accelerate Windows guest's dense IO operations
at boot time.

Signed-off-by: Zhang xiantao <xiantao.zhang@intel.com>
author kfraser@localhost.localdomain
date Wed Oct 25 15:29:08 2006 +0100 (2006-10-25)
parents 21905d2497d6
children abee5c6b930d
files tools/libxc/ia64/xc_ia64_hvm_build.c tools/python/xen/xend/image.py xen/arch/ia64/vmx/mmio.c xen/arch/ia64/vmx/vmx_init.c xen/include/asm-ia64/vmx_platform.h xen/include/asm-ia64/vmx_vcpu.h xen/include/public/arch-ia64.h
line diff
     1.1 --- a/tools/libxc/ia64/xc_ia64_hvm_build.c	Wed Oct 25 15:25:13 2006 +0100
     1.2 +++ b/tools/libxc/ia64/xc_ia64_hvm_build.c	Wed Oct 25 15:29:08 2006 +0100
     1.3 @@ -551,8 +551,9 @@ setup_guest(int xc_handle, uint32_t dom,
     1.4              char *image, unsigned long image_size, uint32_t vcpus,
     1.5              unsigned int store_evtchn, unsigned long *store_mfn)
     1.6  {
     1.7 -    unsigned long page_array[2];
     1.8 +    unsigned long page_array[3];
     1.9      shared_iopage_t *sp;
    1.10 +    void *ioreq_buffer_page;
    1.11      unsigned long dom_memsize = (memsize << 20);
    1.12      DECLARE_DOMCTL;
    1.13  
    1.14 @@ -587,7 +588,7 @@ setup_guest(int xc_handle, uint32_t dom,
    1.15  
    1.16      /* Retrieve special pages like io, xenstore, etc. */
    1.17      if (xc_ia64_get_pfn_list(xc_handle, dom, page_array,
    1.18 -                             IO_PAGE_START>>PAGE_SHIFT, 2) != 2) {
    1.19 +                             IO_PAGE_START>>PAGE_SHIFT, 3) != 3) {
    1.20          PERROR("Could not get the page frame list");
    1.21          goto error_out;
    1.22      }
    1.23 @@ -604,7 +605,10 @@ setup_guest(int xc_handle, uint32_t dom,
    1.24  
    1.25      memset(sp, 0, PAGE_SIZE);
    1.26      munmap(sp, PAGE_SIZE);
    1.27 -
    1.28 +    ioreq_buffer_page = xc_map_foreign_range(xc_handle, dom,
    1.29 +                               PAGE_SIZE, PROT_READ|PROT_WRITE, page_array[2]); 
    1.30 +    memset(ioreq_buffer_page,0,PAGE_SIZE);
    1.31 +    munmap(ioreq_buffer_page, PAGE_SIZE);
    1.32      return 0;
    1.33  
    1.34  error_out:
     2.1 --- a/tools/python/xen/xend/image.py	Wed Oct 25 15:25:13 2006 +0100
     2.2 +++ b/tools/python/xen/xend/image.py	Wed Oct 25 15:29:08 2006 +0100
     2.3 @@ -471,7 +471,7 @@ class IA64_HVM_ImageHandler(HVMImageHand
     2.4      def getRequiredAvailableMemory(self, mem_kb):
     2.5          page_kb = 16
     2.6          # ROM size for guest firmware, ioreq page and xenstore page
     2.7 -        extra_pages = 1024 + 2
     2.8 +        extra_pages = 1024 + 3
     2.9          return mem_kb + extra_pages * page_kb
    2.10  
    2.11      def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
     3.1 --- a/xen/arch/ia64/vmx/mmio.c	Wed Oct 25 15:25:13 2006 +0100
     3.2 +++ b/xen/arch/ia64/vmx/mmio.c	Wed Oct 25 15:29:08 2006 +0100
     3.3 @@ -52,6 +52,70 @@ struct mmio_list *lookup_mmio(u64 gpa, s
     3.4  #define PIB_OFST_INTA           0x1E0000
     3.5  #define PIB_OFST_XTP            0x1E0008
     3.6  
     3.7 +#define HVM_BUFFERED_IO_RANGE_NR 1
     3.8 +
     3.9 +struct hvm_buffered_io_range {
    3.10 +    unsigned long start_addr;
    3.11 +    unsigned long length;
    3.12 +};
    3.13 +
    3.14 +static struct hvm_buffered_io_range buffered_stdvga_range = {0xA0000, 0x20000};
    3.15 +static struct hvm_buffered_io_range
    3.16 +*hvm_buffered_io_ranges[HVM_BUFFERED_IO_RANGE_NR] =
    3.17 +{
    3.18 +    &buffered_stdvga_range
    3.19 +};
    3.20 +
    3.21 +int hvm_buffered_io_intercept(ioreq_t *p)
    3.22 +{
    3.23 +    struct vcpu *v = current;
    3.24 +    spinlock_t  *buffered_io_lock;
    3.25 +    buffered_iopage_t *buffered_iopage =
    3.26 +        (buffered_iopage_t *)(v->domain->arch.hvm_domain.buffered_io_va);
    3.27 +    unsigned long tmp_write_pointer = 0;
    3.28 +    int i;
    3.29 +
    3.30 +    /* ignore READ ioreq_t! */
    3.31 +    if ( p->dir == IOREQ_READ )
    3.32 +        return 0;
    3.33 +
    3.34 +    for ( i = 0; i < HVM_BUFFERED_IO_RANGE_NR; i++ ) {
    3.35 +        if ( p->addr >= hvm_buffered_io_ranges[i]->start_addr &&
    3.36 +             p->addr + p->size - 1 < hvm_buffered_io_ranges[i]->start_addr +
    3.37 +                                     hvm_buffered_io_ranges[i]->length )
    3.38 +            break;
    3.39 +    }
    3.40 +
    3.41 +    if ( i == HVM_BUFFERED_IO_RANGE_NR )
    3.42 +        return 0;
    3.43 +
    3.44 +    buffered_io_lock = &v->domain->arch.hvm_domain.buffered_io_lock;
    3.45 +    spin_lock(buffered_io_lock);
    3.46 +
    3.47 +    if ( buffered_iopage->write_pointer - buffered_iopage->read_pointer ==
    3.48 +         (unsigned long)IOREQ_BUFFER_SLOT_NUM ) {
    3.49 +        /* the queue is full.
    3.50 +         * send the iopacket through the normal path.
    3.51 +         * NOTE: The arithimetic operation could handle the situation for
    3.52 +         * write_pointer overflow.
    3.53 +         */
    3.54 +        spin_unlock(buffered_io_lock);
    3.55 +        return 0;
    3.56 +    }
    3.57 +
    3.58 +    tmp_write_pointer = buffered_iopage->write_pointer % IOREQ_BUFFER_SLOT_NUM;
    3.59 +
    3.60 +    memcpy(&buffered_iopage->ioreq[tmp_write_pointer], p, sizeof(ioreq_t));
    3.61 +
    3.62 +    /*make the ioreq_t visible before write_pointer*/
    3.63 +    wmb();
    3.64 +    buffered_iopage->write_pointer++;
    3.65 +
    3.66 +    spin_unlock(buffered_io_lock);
    3.67 +
    3.68 +    return 1;
    3.69 +}
    3.70 +
    3.71  static void write_ipi (VCPU *vcpu, uint64_t addr, uint64_t value);
    3.72  
    3.73  static void pib_write(VCPU *vcpu, void *src, uint64_t pib_off, size_t s, int ma)
    3.74 @@ -156,7 +220,11 @@ static void low_mmio_access(VCPU *vcpu, 
    3.75      p->df = 0;
    3.76  
    3.77      p->io_count++;
    3.78 -
    3.79 +    if(hvm_buffered_io_intercept(p)){
    3.80 +        p->state = STATE_IORESP_READY;
    3.81 +        vmx_io_assist(v);
    3.82 +        return ;
    3.83 +    }else 
    3.84      vmx_send_assist_req(v);
    3.85      if(dir==IOREQ_READ){ //read
    3.86          *val=p->u.data;
     4.1 --- a/xen/arch/ia64/vmx/vmx_init.c	Wed Oct 25 15:25:13 2006 +0100
     4.2 +++ b/xen/arch/ia64/vmx/vmx_init.c	Wed Oct 25 15:29:08 2006 +0100
     4.3 @@ -362,8 +362,8 @@ static const io_range_t io_ranges[] = {
     4.4  	{PIB_START, PIB_SIZE, GPFN_PIB},
     4.5  };
     4.6  
     4.7 -/* Reseve 1 page for shared I/O and 1 page for xenstore.  */
     4.8 -#define VMX_SYS_PAGES	(2 + (GFW_SIZE >> PAGE_SHIFT))
     4.9 +/* Reseve 1 page for shared I/O ,1 page for xenstore and 1 page for buffer I/O.  */
    4.10 +#define VMX_SYS_PAGES	(3 + (GFW_SIZE >> PAGE_SHIFT))
    4.11  #define VMX_CONFIG_PAGES(d) ((d)->max_pages - VMX_SYS_PAGES)
    4.12  
    4.13  static void vmx_build_physmap_table(struct domain *d)
    4.14 @@ -424,8 +424,12 @@ static void vmx_build_physmap_table(stru
    4.15  	mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
    4.16  	assign_domain_page(d, STORE_PAGE_START, mfn << PAGE_SHIFT);
    4.17  	list_ent = mfn_to_page(mfn)->list.next;
    4.18 +	ASSERT(list_ent != &d->page_list);
    4.19 +    
    4.20 +    mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
    4.21 +    assign_domain_page(d, BUFFER_IO_PAGE_START, mfn << PAGE_SHIFT);
    4.22 +    list_ent = mfn_to_page(mfn)->list.next;
    4.23  	ASSERT(list_ent == &d->page_list);
    4.24 -
    4.25  }
    4.26  
    4.27  void vmx_setup_platform(struct domain *d)
    4.28 @@ -436,6 +440,10 @@ void vmx_setup_platform(struct domain *d
    4.29  
    4.30  	d->arch.vmx_platform.shared_page_va =
    4.31  		(unsigned long)__va(__gpa_to_mpa(d, IO_PAGE_START));
    4.32 +    //For buffered IO requests.
    4.33 +    spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
    4.34 +    d->arch.hvm_domain.buffered_io_va =
    4.35 +        (unsigned long)__va(__gpa_to_mpa(d, BUFFER_IO_PAGE_START));
    4.36  	/* TEMP */
    4.37  	d->arch.vmx_platform.pib_base = 0xfee00000UL;
    4.38  
     5.1 --- a/xen/include/asm-ia64/vmx_platform.h	Wed Oct 25 15:25:13 2006 +0100
     5.2 +++ b/xen/include/asm-ia64/vmx_platform.h	Wed Oct 25 15:29:08 2006 +0100
     5.3 @@ -24,6 +24,8 @@
     5.4  #include <asm/hvm/vioapic.h>
     5.5  struct mmio_list;
     5.6  typedef struct virtual_platform_def {
     5.7 +    unsigned long          buffered_io_va;
     5.8 +    spinlock_t             buffered_io_lock;
     5.9      unsigned long       shared_page_va;
    5.10      unsigned long       pib_base;
    5.11      unsigned char       xtp;
     6.1 --- a/xen/include/asm-ia64/vmx_vcpu.h	Wed Oct 25 15:25:13 2006 +0100
     6.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h	Wed Oct 25 15:29:08 2006 +0100
     6.3 @@ -57,7 +57,6 @@ extern int check_indirect_reg_rsv_fields
     6.4  extern u64 set_isr_ei_ni (VCPU *vcpu);
     6.5  extern u64 set_isr_for_na_inst(VCPU *vcpu, int op);
     6.6  
     6.7 -
     6.8  /* next all for VTI domain APIs definition */
     6.9  extern void vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value);
    6.10  extern UINT64 vmx_vcpu_sync_mpsr(UINT64 mipsr, UINT64 value);
     7.1 --- a/xen/include/public/arch-ia64.h	Wed Oct 25 15:25:13 2006 +0100
     7.2 +++ b/xen/include/public/arch-ia64.h	Wed Oct 25 15:29:08 2006 +0100
     7.3 @@ -80,6 +80,9 @@ typedef unsigned long xen_ulong_t;
     7.4  #define STORE_PAGE_START (IO_PAGE_START + IO_PAGE_SIZE)
     7.5  #define STORE_PAGE_SIZE	 PAGE_SIZE
     7.6  
     7.7 +#define BUFFER_IO_PAGE_START (STORE_PAGE_START+PAGE_SIZE)
     7.8 +#define BUFFER_IO_PAGE_SIZE PAGE_SIZE
     7.9 +
    7.10  #define IO_SAPIC_START   0xfec00000UL
    7.11  #define IO_SAPIC_SIZE    0x100000
    7.12