ia64/xen-unstable
changeset 860:02306208d767
bitkeeper revision 1.536 (3f9fd20aglcfc5h0kB1oldciJuy2dQ)
Many files:
Modified blkdev and network interfaces to allow expedited flushing of pending request buffers.
Many files:
Modified blkdev and network interfaces to allow expedited flushing of pending request buffers.
line diff
1.1 --- a/extras/mini-os/h/hypervisor.h Tue Oct 28 20:59:42 2003 +0000 1.2 +++ b/extras/mini-os/h/hypervisor.h Wed Oct 29 14:43:22 2003 +0000 1.3 @@ -103,12 +103,13 @@ static inline int HYPERVISOR_set_callbac 1.4 return ret; 1.5 } 1.6 1.7 -static inline int HYPERVISOR_net_update(void) 1.8 +static inline int HYPERVISOR_net_io_op(unsigned int op, unsigned int idx) 1.9 { 1.10 int ret; 1.11 __asm__ __volatile__ ( 1.12 TRAP_INSTR 1.13 - : "=a" (ret) : "0" (__HYPERVISOR_net_update) ); 1.14 + : "=a" (ret) : "0" (__HYPERVISOR_net_io_op), 1.15 + "b" (op), "c" (idx) ); 1.16 1.17 return ret; 1.18 } 1.19 @@ -165,12 +166,13 @@ static inline int HYPERVISOR_network_op( 1.20 return ret; 1.21 } 1.22 1.23 -static inline int HYPERVISOR_block_io_op(void) 1.24 +static inline int HYPERVISOR_block_io_op(unsigned int op) 1.25 { 1.26 int ret; 1.27 __asm__ __volatile__ ( 1.28 TRAP_INSTR 1.29 - : "=a" (ret) : "0" (__HYPERVISOR_block_io_op) ); 1.30 + : "=a" (ret) : "0" (__HYPERVISOR_block_io_op), 1.31 + "b" (op) ); 1.32 1.33 return ret; 1.34 }
2.1 --- a/extras/mini-os/head.S Tue Oct 28 20:59:42 2003 +0000 2.2 +++ b/extras/mini-os/head.S Wed Oct 29 14:43:22 2003 +0000 2.3 @@ -1,9 +1,8 @@ 2.4 #include <os.h> 2.5 2.6 /* Offsets in start_info structure */ 2.7 -#define SHARED_INFO 4 2.8 -#define MOD_START 12 2.9 -#define MOD_LEN 16 2.10 +#define MOD_START 4 2.11 +#define MOD_LEN 8 2.12 2.13 #define ENTRY(X) .globl X ; X : 2.14 2.15 @@ -31,10 +30,10 @@ 1: sub $4,%eax 2.16 2.17 /* Clear BSS first so that there are no surprises... */ 2.18 2: xorl %eax,%eax 2.19 - movl $__bss_start,%edi 2.20 - movl $_end,%ecx 2.21 - subl %edi,%ecx 2.22 - rep stosb 2.23 + movl $__bss_start,%edi 2.24 + movl $_end,%ecx 2.25 + subl %edi,%ecx 2.26 + rep stosb 2.27 2.28 push %esi 2.29 call start_kernel
3.1 --- a/tools/internal/xi_build.c Tue Oct 28 20:59:42 2003 +0000 3.2 +++ b/tools/internal/xi_build.c Wed Oct 29 14:43:22 2003 +0000 3.3 @@ -230,12 +230,8 @@ static int setup_guestos( 3.4 3.5 alloc_index = tot_pages - 1; 3.6 3.7 - /* 3.8 - * Count bottom-level PTs, rounding up. Include one PTE for shared info. We 3.9 - * therefore add 1024 because 1 is for shared_info, 1023 is to round up. 3.10 - */ 3.11 - num_pt_pages = 3.12 - (l1_table_offset(virt_load_addr) + tot_pages + 1024) / 1024; 3.13 + /* Count bottom-level PTs, rounding up. */ 3.14 + num_pt_pages = (l1_table_offset(virt_load_addr) + tot_pages + 1023) / 1024; 3.15 3.16 /* We must also count the page directory. */ 3.17 num_pt_pages++; 3.18 @@ -250,7 +246,6 @@ static int setup_guestos( 3.19 l2tab = page_array[alloc_index] << PAGE_SHIFT; 3.20 alloc_index--; 3.21 meminfo->l2_pgt_addr = l2tab; 3.22 - meminfo->virt_shinfo_addr = virt_load_addr + (tot_pages << PAGE_SHIFT); 3.23 3.24 /* 3.25 * Pin down l2tab addr as page dir page - causes hypervisor to provide 3.26 @@ -261,16 +256,12 @@ static int setup_guestos( 3.27 pgt_updates++; 3.28 num_pgt_updates++; 3.29 3.30 - /* 3.31 - * Initialise the page tables. The final iteration is for the shared_info 3.32 - * PTE -- we break out before filling in the entry, as that is done by 3.33 - * Xen during final setup. 3.34 - */ 3.35 + /* Initialise the page tables. */ 3.36 if ( (vl2tab = map_pfn(l2tab >> PAGE_SHIFT)) == NULL ) 3.37 goto error_out; 3.38 memset(vl2tab, 0, PAGE_SIZE); 3.39 vl2e = vl2tab + l2_table_offset(virt_load_addr); 3.40 - for ( count = 0; count < (tot_pages + 1); count++ ) 3.41 + for ( count = 0; count < tot_pages; count++ ) 3.42 { 3.43 if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 ) 3.44 { 3.45 @@ -291,9 +282,6 @@ static int setup_guestos( 3.46 vl2e++; 3.47 } 3.48 3.49 - /* The last PTE we consider is filled in later by Xen. */ 3.50 - if ( count == tot_pages ) break; 3.51 - 3.52 if ( count < pt_start ) 3.53 { 3.54 pgt_updates->ptr = (unsigned long)vl1e;
4.1 --- a/xen/arch/i386/entry.S Tue Oct 28 20:59:42 2003 +0000 4.2 +++ b/xen/arch/i386/entry.S Wed Oct 29 14:43:22 2003 +0000 4.3 @@ -710,7 +710,7 @@ ENTRY(hypervisor_call_table) 4.4 .long SYMBOL_NAME(do_set_gdt) 4.5 .long SYMBOL_NAME(do_stack_switch) 4.6 .long SYMBOL_NAME(do_set_callbacks) 4.7 - .long SYMBOL_NAME(do_net_update) 4.8 + .long SYMBOL_NAME(do_net_io_op) 4.9 .long SYMBOL_NAME(do_fpu_taskswitch) 4.10 .long SYMBOL_NAME(do_yield) 4.11 .long SYMBOL_NAME(kill_domain)
5.1 --- a/xen/common/domain.c Tue Oct 28 20:59:42 2003 +0000 5.2 +++ b/xen/common/domain.c Wed Oct 29 14:43:22 2003 +0000 5.3 @@ -347,7 +347,6 @@ void release_task(struct task_struct *p) 5.4 int final_setup_guestos(struct task_struct * p, dom_meminfo_t * meminfo) 5.5 { 5.6 l2_pgentry_t * l2tab; 5.7 - l1_pgentry_t * l1tab; 5.8 start_info_t * virt_startinfo_addr; 5.9 unsigned long virt_stack_addr; 5.10 unsigned long phys_l2tab; 5.11 @@ -374,19 +373,9 @@ int final_setup_guestos(struct task_stru 5.12 p->mm.pagetable = mk_pagetable(phys_l2tab); 5.13 unmap_domain_mem(l2tab); 5.14 5.15 - /* map in the shared info structure */ 5.16 - phys_l2tab = pagetable_val(p->mm.pagetable); 5.17 - l2tab = map_domain_mem(phys_l2tab); 5.18 - l2tab += l2_table_offset(meminfo->virt_shinfo_addr); 5.19 - l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab)); 5.20 - l1tab += l1_table_offset(meminfo->virt_shinfo_addr); 5.21 - *l1tab = mk_l1_pgentry(__pa(p->shared_info) | L1_PROT); 5.22 - unmap_domain_mem((void *)((unsigned long)l2tab & PAGE_MASK)); 5.23 - unmap_domain_mem((void *)((unsigned long)l1tab & PAGE_MASK)); 5.24 - 5.25 /* set up the shared info structure */ 5.26 update_dom_time(p->shared_info); 5.27 - p->shared_info->domain_time = 0; 5.28 + p->shared_info->domain_time = 0; 5.29 5.30 /* we pass start info struct to guest os as function parameter on stack */ 5.31 virt_startinfo_addr = (start_info_t *)meminfo->virt_startinfo_addr; 5.32 @@ -401,7 +390,7 @@ int final_setup_guestos(struct task_stru 5.33 5.34 memset(virt_startinfo_addr, 0, sizeof(*virt_startinfo_addr)); 5.35 virt_startinfo_addr->nr_pages = p->tot_pages; 5.36 - virt_startinfo_addr->shared_info = (shared_info_t *)meminfo->virt_shinfo_addr; 5.37 + virt_startinfo_addr->shared_info = virt_to_phys(p->shared_info); 5.38 virt_startinfo_addr->pt_base = meminfo->virt_load_addr + 5.39 ((p->tot_pages - 1) << PAGE_SHIFT); 5.40 5.41 @@ -474,7 +463,7 @@ int setup_guestos(struct task_struct *p, 5.42 int i, dom = p->domain; 5.43 unsigned long phys_l1tab, phys_l2tab; 5.44 unsigned long cur_address, alloc_address; 5.45 - unsigned long virt_load_address, virt_stack_address, virt_shinfo_address; 5.46 + unsigned long virt_load_address, virt_stack_address; 5.47 start_info_t *virt_startinfo_address; 5.48 unsigned long count; 5.49 unsigned long alloc_index; 5.50 @@ -551,16 +540,11 @@ int setup_guestos(struct task_struct *p, 5.51 memset(l2tab, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE*sizeof(l2_pgentry_t)); 5.52 p->mm.pagetable = mk_pagetable(phys_l2tab); 5.53 5.54 - /* 5.55 - * NB. The upper limit on this loop does one extra page. This is to make 5.56 - * sure a pte exists when we want to map the shared_info struct. 5.57 - */ 5.58 - 5.59 l2tab += l2_table_offset(virt_load_address); 5.60 cur_address = list_entry(p->pg_head.next, struct pfn_info, list) - 5.61 frame_table; 5.62 cur_address <<= PAGE_SHIFT; 5.63 - for ( count = 0; count < p->tot_pages + 1; count++ ) 5.64 + for ( count = 0; count < p->tot_pages; count++ ) 5.65 { 5.66 if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) ) 5.67 { 5.68 @@ -574,14 +558,11 @@ int setup_guestos(struct task_struct *p, 5.69 } 5.70 *l1tab++ = mk_l1_pgentry(cur_address|L1_PROT); 5.71 5.72 - if ( count < p->tot_pages ) 5.73 - { 5.74 - page = frame_table + (cur_address >> PAGE_SHIFT); 5.75 - page->flags = dom | PGT_writeable_page | PG_need_flush; 5.76 - page->type_count = page->tot_count = 1; 5.77 - /* Set up the MPT entry. */ 5.78 - machine_to_phys_mapping[cur_address >> PAGE_SHIFT] = count; 5.79 - } 5.80 + page = frame_table + (cur_address >> PAGE_SHIFT); 5.81 + page->flags = dom | PGT_writeable_page | PG_need_flush; 5.82 + page->type_count = page->tot_count = 1; 5.83 + /* Set up the MPT entry. */ 5.84 + machine_to_phys_mapping[cur_address >> PAGE_SHIFT] = count; 5.85 5.86 list_ent = frame_table[cur_address >> PAGE_SHIFT].list.next; 5.87 cur_address = list_entry(list_ent, struct pfn_info, list) - 5.88 @@ -630,17 +611,9 @@ int setup_guestos(struct task_struct *p, 5.89 page->flags = dom | PGT_l2_page_table; 5.90 unmap_domain_mem(l1start); 5.91 5.92 - /* Map in the the shared info structure. */ 5.93 - virt_shinfo_address = virt_load_address + (p->tot_pages << PAGE_SHIFT); 5.94 - l2tab = l2start + l2_table_offset(virt_shinfo_address); 5.95 - l1start = l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab)); 5.96 - l1tab += l1_table_offset(virt_shinfo_address); 5.97 - *l1tab = mk_l1_pgentry(__pa(p->shared_info)|L1_PROT); 5.98 - unmap_domain_mem(l1start); 5.99 - 5.100 /* Set up shared info area. */ 5.101 update_dom_time(p->shared_info); 5.102 - p->shared_info->domain_time = 0; 5.103 + p->shared_info->domain_time = 0; 5.104 5.105 virt_startinfo_address = (start_info_t *) 5.106 (virt_load_address + ((alloc_index - 1) << PAGE_SHIFT)); 5.107 @@ -671,8 +644,7 @@ int setup_guestos(struct task_struct *p, 5.108 /* Set up start info area. */ 5.109 memset(virt_startinfo_address, 0, sizeof(*virt_startinfo_address)); 5.110 virt_startinfo_address->nr_pages = p->tot_pages; 5.111 - virt_startinfo_address->shared_info = 5.112 - (shared_info_t *)virt_shinfo_address; 5.113 + virt_startinfo_address->shared_info = virt_to_phys(p->shared_info); 5.114 virt_startinfo_address->pt_base = virt_load_address + 5.115 ((p->tot_pages - 1) << PAGE_SHIFT); 5.116
6.1 --- a/xen/common/network.c Tue Oct 28 20:59:42 2003 +0000 6.2 +++ b/xen/common/network.c Wed Oct 29 14:43:22 2003 +0000 6.3 @@ -159,40 +159,10 @@ net_vif_t *create_net_vif(int domain) 6.4 6.5 void destroy_net_vif(net_vif_t *vif) 6.6 { 6.7 - int i; 6.8 - unsigned long *pte, flags; 6.9 - struct pfn_info *page; 6.10 + extern long flush_bufs_for_vif(net_vif_t *vif); 6.11 struct task_struct *p = vif->domain; 6.12 - 6.13 - /* Return any outstanding receive buffers to the guest OS. */ 6.14 - spin_lock_irqsave(&p->page_lock, flags); 6.15 - for ( i = vif->rx_cons; i != vif->rx_prod; i = ((i+1) & (RX_RING_SIZE-1)) ) 6.16 - { 6.17 - rx_shadow_entry_t *rx = vif->rx_shadow_ring + i; 6.18 - 6.19 - /* Release the page-table page. */ 6.20 - page = frame_table + (rx->pte_ptr >> PAGE_SHIFT); 6.21 - put_page_type(page); 6.22 - put_page_tot(page); 6.23 - 6.24 - /* Give the buffer page back to the domain. */ 6.25 - page = frame_table + rx->buf_pfn; 6.26 - list_add(&page->list, &p->pg_head); 6.27 - page->flags = vif->domain->domain; 6.28 - 6.29 - /* Patch up the PTE if it hasn't changed under our feet. */ 6.30 - pte = map_domain_mem(rx->pte_ptr); 6.31 - if ( !(*pte & _PAGE_PRESENT) ) 6.32 - { 6.33 - *pte = (rx->buf_pfn<<PAGE_SHIFT) | (*pte & ~PAGE_MASK) | 6.34 - _PAGE_RW | _PAGE_PRESENT; 6.35 - page->flags |= PGT_writeable_page | PG_need_flush; 6.36 - page->type_count = page->tot_count = 1; 6.37 - } 6.38 - unmap_domain_mem(pte); 6.39 - } 6.40 - spin_unlock_irqrestore(&p->page_lock, flags); 6.41 - 6.42 + (void)flush_bufs_for_vif(vif); 6.43 + UNSHARE_PFN(virt_to_page(vif->shared_rings)); 6.44 kmem_cache_free(net_vif_cache, vif); 6.45 put_task_struct(p); 6.46 }
7.1 --- a/xen/drivers/block/xen_block.c Tue Oct 28 20:59:42 2003 +0000 7.2 +++ b/xen/drivers/block/xen_block.c Wed Oct 29 14:43:22 2003 +0000 7.3 @@ -241,11 +241,26 @@ static void end_block_io_op(struct buffe 7.4 * GUEST-OS SYSCALL -- Indicates there are requests outstanding. 7.5 */ 7.6 7.7 -long do_block_io_op(void) 7.8 +long do_block_io_op(unsigned int op) 7.9 { 7.10 - add_to_blkdev_list_tail(current); 7.11 - maybe_trigger_io_schedule(); 7.12 - return 0L; 7.13 + long ret = 0; 7.14 + 7.15 + switch ( op ) 7.16 + { 7.17 + case BLKOP_PUSH_BUFFERS: 7.18 + add_to_blkdev_list_tail(current); 7.19 + maybe_trigger_io_schedule(); 7.20 + break; 7.21 + 7.22 + case BLKOP_FLUSH_BUFFERS: 7.23 + break; 7.24 + 7.25 + default: 7.26 + ret = -EINVAL; 7.27 + break; 7.28 + } 7.29 + 7.30 + return ret; 7.31 } 7.32 7.33
8.1 --- a/xen/include/hypervisor-ifs/block.h Tue Oct 28 20:59:42 2003 +0000 8.2 +++ b/xen/include/hypervisor-ifs/block.h Wed Oct 29 14:43:22 2003 +0000 8.3 @@ -9,6 +9,14 @@ 8.4 #define __BLOCK_H__ 8.5 8.6 /* 8.7 + * Command values for block_io_op() 8.8 + */ 8.9 + 8.10 +#define BLKOP_PUSH_BUFFERS 0 /* Notify Xen of new requests on the ring. */ 8.11 +#define BLKOP_FLUSH_BUFFERS 1 /* Flush all pending request buffers. */ 8.12 + 8.13 + 8.14 +/* 8.15 * Device numbers 8.16 */ 8.17
9.1 --- a/xen/include/hypervisor-ifs/dom0_ops.h Tue Oct 28 20:59:42 2003 +0000 9.2 +++ b/xen/include/hypervisor-ifs/dom0_ops.h Wed Oct 29 14:43:22 2003 +0000 9.3 @@ -57,7 +57,6 @@ typedef struct domain_launch 9.4 unsigned int domain; 9.5 unsigned long l2_pgt_addr; 9.6 unsigned long virt_load_addr; 9.7 - unsigned long virt_shinfo_addr; 9.8 unsigned long virt_startinfo_addr; 9.9 unsigned int num_vifs; 9.10 char cmd_line[MAX_CMD_LEN];
10.1 --- a/xen/include/hypervisor-ifs/hypervisor-if.h Tue Oct 28 20:59:42 2003 +0000 10.2 +++ b/xen/include/hypervisor-ifs/hypervisor-if.h Wed Oct 29 14:43:22 2003 +0000 10.3 @@ -19,9 +19,9 @@ 10.4 * NB. The reserved range is inclusive (that is, both FIRST_RESERVED_GDT_ENTRY 10.5 * and LAST_RESERVED_GDT_ENTRY are reserved). 10.6 */ 10.7 -#define NR_RESERVED_GDT_ENTRIES 40 10.8 -#define FIRST_RESERVED_GDT_ENTRY 256 10.9 -#define LAST_RESERVED_GDT_ENTRY \ 10.10 +#define NR_RESERVED_GDT_ENTRIES 40 10.11 +#define FIRST_RESERVED_GDT_ENTRY 256 10.12 +#define LAST_RESERVED_GDT_ENTRY \ 10.13 (FIRST_RESERVED_GDT_ENTRY + NR_RESERVED_GDT_ENTRIES - 1) 10.14 10.15 /* 10.16 @@ -29,10 +29,10 @@ 10.17 * are also present in the initial GDT, many OSes will be able to avoid 10.18 * installing their own GDT. 10.19 */ 10.20 -#define FLAT_RING1_CS 0x0819 10.21 -#define FLAT_RING1_DS 0x0821 10.22 -#define FLAT_RING3_CS 0x082b 10.23 -#define FLAT_RING3_DS 0x0833 10.24 +#define FLAT_RING1_CS 0x0819 10.25 +#define FLAT_RING1_DS 0x0821 10.26 +#define FLAT_RING3_CS 0x082b 10.27 +#define FLAT_RING3_DS 0x0833 10.28 10.29 10.30 /* 10.31 @@ -40,25 +40,25 @@ 10.32 */ 10.33 10.34 /* EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5. */ 10.35 -#define __HYPERVISOR_set_trap_table 0 10.36 -#define __HYPERVISOR_mmu_update 1 10.37 -#define __HYPERVISOR_console_write 2 10.38 -#define __HYPERVISOR_set_gdt 3 10.39 +#define __HYPERVISOR_set_trap_table 0 10.40 +#define __HYPERVISOR_mmu_update 1 10.41 +#define __HYPERVISOR_console_write 2 10.42 +#define __HYPERVISOR_set_gdt 3 10.43 #define __HYPERVISOR_stack_switch 4 10.44 #define __HYPERVISOR_set_callbacks 5 10.45 -#define __HYPERVISOR_net_update 6 10.46 -#define __HYPERVISOR_fpu_taskswitch 7 10.47 -#define __HYPERVISOR_yield 8 10.48 -#define __HYPERVISOR_exit 9 10.49 -#define __HYPERVISOR_dom0_op 10 10.50 -#define __HYPERVISOR_network_op 11 10.51 -#define __HYPERVISOR_block_io_op 12 10.52 -#define __HYPERVISOR_set_debugreg 13 10.53 -#define __HYPERVISOR_get_debugreg 14 10.54 -#define __HYPERVISOR_update_descriptor 15 10.55 -#define __HYPERVISOR_set_fast_trap 16 10.56 -#define __HYPERVISOR_dom_mem_op 17 10.57 -#define __HYPERVISOR_multicall 18 10.58 +#define __HYPERVISOR_net_io_op 6 10.59 +#define __HYPERVISOR_fpu_taskswitch 7 10.60 +#define __HYPERVISOR_yield 8 10.61 +#define __HYPERVISOR_exit 9 10.62 +#define __HYPERVISOR_dom0_op 10 10.63 +#define __HYPERVISOR_network_op 11 10.64 +#define __HYPERVISOR_block_io_op 12 10.65 +#define __HYPERVISOR_set_debugreg 13 10.66 +#define __HYPERVISOR_get_debugreg 14 10.67 +#define __HYPERVISOR_update_descriptor 15 10.68 +#define __HYPERVISOR_set_fast_trap 16 10.69 +#define __HYPERVISOR_dom_mem_op 17 10.70 +#define __HYPERVISOR_multicall 18 10.71 #define __HYPERVISOR_kbd_op 19 10.72 #define __HYPERVISOR_update_va_mapping 20 10.73 10.74 @@ -276,19 +276,19 @@ typedef struct shared_info_st { 10.75 * NB. We expect that this struct is smaller than a page. 10.76 */ 10.77 typedef struct start_info_st { 10.78 - unsigned long nr_pages; /* total pages allocated to this domain */ 10.79 - shared_info_t *shared_info; /* VIRTUAL address of shared info struct */ 10.80 - unsigned long pt_base; /* VIRTUAL address of page directory */ 10.81 - unsigned long mod_start; /* VIRTUAL address of pre-loaded module */ 10.82 - unsigned long mod_len; /* size (bytes) of pre-loaded module */ 10.83 - /* Machine address of net rings for each VIF. Will be page aligned. */ 10.84 - unsigned long net_rings[MAX_DOMAIN_VIFS]; 10.85 - unsigned char net_vmac[MAX_DOMAIN_VIFS][6]; 10.86 - /* Machine address of block-device ring. Will be page aligned. */ 10.87 - unsigned long blk_ring; 10.88 - unsigned int dom_id; 10.89 - unsigned long flags; 10.90 - unsigned char cmd_line[1]; /* variable-length */ 10.91 + /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */ 10.92 + unsigned long pt_base; /* VIRTUAL address of page directory. */ 10.93 + unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ 10.94 + unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ 10.95 + /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ 10.96 + unsigned long nr_pages; /* total pages allocated to this domain. */ 10.97 + unsigned long shared_info; /* MACHINE address of shared info struct.*/ 10.98 + unsigned int dom_id; /* Domain identifier. */ 10.99 + unsigned long flags; /* SIF_xxx flags. */ 10.100 + unsigned long net_rings[MAX_DOMAIN_VIFS]; /* MACHINE address of ring.*/ 10.101 + unsigned char net_vmac[MAX_DOMAIN_VIFS][6]; /* MAC address of VIF. */ 10.102 + unsigned long blk_ring; /* MACHINE address of blkdev ring. */ 10.103 + unsigned char cmd_line[1]; /* Variable-length options. */ 10.104 } start_info_t; 10.105 10.106 /* These flags are passed in the 'flags' field of start_info_t. */
11.1 --- a/xen/include/hypervisor-ifs/network.h Tue Oct 28 20:59:42 2003 +0000 11.2 +++ b/xen/include/hypervisor-ifs/network.h Wed Oct 29 14:43:22 2003 +0000 11.3 @@ -12,6 +12,14 @@ 11.4 #ifndef __RING_H__ 11.5 #define __RING_H__ 11.6 11.7 +/* 11.8 + * Command values for block_io_op() 11.9 + */ 11.10 + 11.11 +#define NETOP_PUSH_BUFFERS 0 /* Notify Xen of new buffers on the rings. */ 11.12 +#define NETOP_FLUSH_BUFFERS 1 /* Flush all pending request buffers. */ 11.13 + 11.14 + 11.15 typedef struct tx_req_entry_st 11.16 { 11.17 unsigned short id;
12.1 --- a/xen/net/dev.c Tue Oct 28 20:59:42 2003 +0000 12.2 +++ b/xen/net/dev.c Wed Oct 29 14:43:22 2003 +0000 12.3 @@ -1877,7 +1877,7 @@ static int get_tx_bufs(net_vif_t *vif) 12.4 tx = shared_rings->tx_ring[i].req; 12.5 target = VIF_DROP; 12.6 12.7 - if ( (tx.size < PKT_PROT_LEN) || (tx.size > ETH_FRAME_LEN) ) 12.8 + if ( (tx.size <= PKT_PROT_LEN) || (tx.size > ETH_FRAME_LEN) ) 12.9 { 12.10 DPRINTK("Bad packet size: %d\n", tx.size); 12.11 __make_tx_response(vif, tx.id, RING_STATUS_BAD_PAGE); 12.12 @@ -2019,130 +2019,222 @@ static int get_tx_bufs(net_vif_t *vif) 12.13 } 12.14 12.15 12.16 +static long get_bufs_from_vif(net_vif_t *vif) 12.17 +{ 12.18 + net_ring_t *shared_rings; 12.19 + net_idx_t *shared_idxs; 12.20 + unsigned int i, j; 12.21 + rx_req_entry_t rx; 12.22 + unsigned long pte_pfn, buf_pfn; 12.23 + struct pfn_info *pte_page, *buf_page; 12.24 + struct task_struct *p = vif->domain; 12.25 + unsigned long *ptep; 12.26 + 12.27 + shared_idxs = vif->shared_idxs; 12.28 + shared_rings = vif->shared_rings; 12.29 + 12.30 + /* 12.31 + * PHASE 1 -- TRANSMIT RING 12.32 + */ 12.33 + 12.34 + if ( get_tx_bufs(vif) ) 12.35 + { 12.36 + add_to_net_schedule_list_tail(vif); 12.37 + maybe_schedule_tx_action(); 12.38 + } 12.39 + 12.40 + /* 12.41 + * PHASE 2 -- RECEIVE RING 12.42 + */ 12.43 + 12.44 + /* 12.45 + * Collect up new receive buffers. We collect up to the guest OS's new 12.46 + * producer index, but take care not to catch up with our own consumer 12.47 + * index. 12.48 + */ 12.49 + j = vif->rx_prod; 12.50 + for ( i = vif->rx_req_cons; 12.51 + (i != shared_idxs->rx_req_prod) && 12.52 + (((vif->rx_resp_prod-i) & (RX_RING_SIZE-1)) != 1); 12.53 + i = RX_RING_INC(i) ) 12.54 + { 12.55 + rx = shared_rings->rx_ring[i].req; 12.56 + 12.57 + pte_pfn = rx.addr >> PAGE_SHIFT; 12.58 + pte_page = frame_table + pte_pfn; 12.59 + 12.60 + spin_lock_irq(&p->page_lock); 12.61 + if ( (pte_pfn >= max_page) || 12.62 + ((pte_page->flags & (PG_type_mask | PG_domain_mask)) != 12.63 + (PGT_l1_page_table | p->domain)) ) 12.64 + { 12.65 + DPRINTK("Bad page frame for ppte %d,%08lx,%08lx,%08lx\n", 12.66 + p->domain, pte_pfn, max_page, pte_page->flags); 12.67 + spin_unlock_irq(&p->page_lock); 12.68 + make_rx_response(vif, rx.id, 0, RING_STATUS_BAD_PAGE, 0); 12.69 + continue; 12.70 + } 12.71 + 12.72 + ptep = map_domain_mem(rx.addr); 12.73 + 12.74 + if ( !(*ptep & _PAGE_PRESENT) ) 12.75 + { 12.76 + DPRINTK("Invalid PTE passed down (not present)\n"); 12.77 + make_rx_response(vif, rx.id, 0, RING_STATUS_BAD_PAGE, 0); 12.78 + goto rx_unmap_and_continue; 12.79 + } 12.80 + 12.81 + buf_pfn = *ptep >> PAGE_SHIFT; 12.82 + buf_page = frame_table + buf_pfn; 12.83 + 12.84 + if ( ((buf_page->flags & (PG_type_mask | PG_domain_mask)) != 12.85 + (PGT_writeable_page | p->domain)) || 12.86 + (buf_page->tot_count != 1) ) 12.87 + { 12.88 + DPRINTK("Need a mapped-once writeable page (%ld/%ld/%08lx)\n", 12.89 + buf_page->type_count, buf_page->tot_count, 12.90 + buf_page->flags); 12.91 + make_rx_response(vif, rx.id, 0, RING_STATUS_BAD_PAGE, 0); 12.92 + goto rx_unmap_and_continue; 12.93 + } 12.94 + 12.95 + /* 12.96 + * The pte they passed was good, so take it away from them. We also 12.97 + * lock down the page-table page, so it doesn't go away. 12.98 + */ 12.99 + get_page_type(pte_page); 12.100 + get_page_tot(pte_page); 12.101 + *ptep &= ~_PAGE_PRESENT; 12.102 + buf_page->flags = buf_page->type_count = buf_page->tot_count = 0; 12.103 + list_del(&buf_page->list); 12.104 + 12.105 + vif->rx_shadow_ring[j].id = rx.id; 12.106 + vif->rx_shadow_ring[j].pte_ptr = rx.addr; 12.107 + vif->rx_shadow_ring[j].buf_pfn = buf_pfn; 12.108 + vif->rx_shadow_ring[j].flush_count = (unsigned short) 12.109 + atomic_read(&tlb_flush_count[smp_processor_id()]); 12.110 + j = RX_RING_INC(j); 12.111 + 12.112 + rx_unmap_and_continue: 12.113 + unmap_domain_mem(ptep); 12.114 + spin_unlock_irq(&p->page_lock); 12.115 + } 12.116 + 12.117 + vif->rx_req_cons = i; 12.118 + 12.119 + if ( vif->rx_prod != j ) 12.120 + { 12.121 + smp_mb(); /* Let other CPUs see new descriptors first. */ 12.122 + vif->rx_prod = j; 12.123 + } 12.124 + 12.125 + return 0; 12.126 +} 12.127 + 12.128 + 12.129 +long flush_bufs_for_vif(net_vif_t *vif) 12.130 +{ 12.131 + int i; 12.132 + unsigned long *pte, flags; 12.133 + struct pfn_info *page; 12.134 + struct task_struct *p = vif->domain; 12.135 + rx_shadow_entry_t *rx; 12.136 + net_ring_t *shared_rings = vif->shared_rings; 12.137 + net_idx_t *shared_idxs = vif->shared_idxs; 12.138 + 12.139 + /* Return any outstanding receive buffers to the guest OS. */ 12.140 + spin_lock_irqsave(&p->page_lock, flags); 12.141 + for ( i = vif->rx_req_cons; 12.142 + (i != shared_idxs->rx_req_prod) && 12.143 + (((vif->rx_resp_prod-i) & (RX_RING_SIZE-1)) != 1); 12.144 + i = RX_RING_INC(i) ) 12.145 + { 12.146 + make_rx_response(vif, shared_rings->rx_ring[i].req.id, 0, 12.147 + RING_STATUS_DROPPED, 0); 12.148 + } 12.149 + vif->rx_req_cons = i; 12.150 + for ( i = vif->rx_cons; i != vif->rx_prod; i = RX_RING_INC(i) ) 12.151 + { 12.152 + rx = &vif->rx_shadow_ring[i]; 12.153 + 12.154 + /* Release the page-table page. */ 12.155 + page = frame_table + (rx->pte_ptr >> PAGE_SHIFT); 12.156 + put_page_type(page); 12.157 + put_page_tot(page); 12.158 + 12.159 + /* Give the buffer page back to the domain. */ 12.160 + page = frame_table + rx->buf_pfn; 12.161 + list_add(&page->list, &p->pg_head); 12.162 + page->flags = vif->domain->domain; 12.163 + 12.164 + /* Patch up the PTE if it hasn't changed under our feet. */ 12.165 + pte = map_domain_mem(rx->pte_ptr); 12.166 + if ( !(*pte & _PAGE_PRESENT) ) 12.167 + { 12.168 + *pte = (rx->buf_pfn<<PAGE_SHIFT) | (*pte & ~PAGE_MASK) | 12.169 + _PAGE_RW | _PAGE_PRESENT; 12.170 + page->flags |= PGT_writeable_page | PG_need_flush; 12.171 + page->type_count = page->tot_count = 1; 12.172 + } 12.173 + unmap_domain_mem(pte); 12.174 + 12.175 + make_rx_response(vif, rx->id, 0, RING_STATUS_DROPPED, 0); 12.176 + } 12.177 + vif->rx_cons = i; 12.178 + spin_unlock_irqrestore(&p->page_lock, flags); 12.179 + 12.180 + /* 12.181 + * Flush pending transmit buffers. The guest may still have to wait for 12.182 + * buffers that are queued at a physical NIC. 12.183 + */ 12.184 + spin_lock_irqsave(&vif->tx_lock, flags); 12.185 + for ( i = vif->tx_req_cons; 12.186 + (i != shared_idxs->tx_req_prod) && 12.187 + (((vif->tx_resp_prod-i) & (TX_RING_SIZE-1)) != 1); 12.188 + i = TX_RING_INC(i) ) 12.189 + { 12.190 + __make_tx_response(vif, shared_rings->tx_ring[i].req.id, 12.191 + RING_STATUS_DROPPED); 12.192 + } 12.193 + vif->tx_req_cons = i; 12.194 + spin_unlock_irqrestore(&vif->tx_lock, flags); 12.195 + 12.196 + return 0; 12.197 +} 12.198 + 12.199 + 12.200 /* 12.201 - * do_net_update: 12.202 + * do_net_io_op: 12.203 * 12.204 * Called from guest OS to notify updates to its transmit and/or receive 12.205 * descriptor rings. 12.206 */ 12.207 - 12.208 -long do_net_update(void) 12.209 +long do_net_io_op(unsigned int op, unsigned int idx) 12.210 { 12.211 - net_ring_t *shared_rings; 12.212 net_vif_t *vif; 12.213 - net_idx_t *shared_idxs; 12.214 - unsigned int i, j, idx; 12.215 - rx_req_entry_t rx; 12.216 - unsigned long pte_pfn, buf_pfn; 12.217 - struct pfn_info *pte_page, *buf_page; 12.218 - unsigned long *ptep; 12.219 + long ret; 12.220 12.221 perfc_incr(net_hypercalls); 12.222 12.223 - for ( idx = 0; idx < MAX_DOMAIN_VIFS; idx++ ) 12.224 - { 12.225 - if ( (vif = current->net_vif_list[idx]) == NULL ) 12.226 - break; 12.227 - 12.228 - shared_idxs = vif->shared_idxs; 12.229 - shared_rings = vif->shared_rings; 12.230 - 12.231 - /* 12.232 - * PHASE 1 -- TRANSMIT RING 12.233 - */ 12.234 - 12.235 - if ( get_tx_bufs(vif) ) 12.236 - { 12.237 - add_to_net_schedule_list_tail(vif); 12.238 - maybe_schedule_tx_action(); 12.239 - } 12.240 - 12.241 - /* 12.242 - * PHASE 2 -- RECEIVE RING 12.243 - */ 12.244 - 12.245 - /* 12.246 - * Collect up new receive buffers. We collect up to the guest OS's 12.247 - * new producer index, but take care not to catch up with our own 12.248 - * consumer index. 12.249 - */ 12.250 - j = vif->rx_prod; 12.251 - for ( i = vif->rx_req_cons; 12.252 - (i != shared_idxs->rx_req_prod) && 12.253 - (((vif->rx_resp_prod-i) & (RX_RING_SIZE-1)) != 1); 12.254 - i = RX_RING_INC(i) ) 12.255 - { 12.256 - rx = shared_rings->rx_ring[i].req; 12.257 + if ( (vif = current->net_vif_list[idx]) == NULL ) 12.258 + return -EINVAL; 12.259 12.260 - pte_pfn = rx.addr >> PAGE_SHIFT; 12.261 - pte_page = frame_table + pte_pfn; 12.262 - 12.263 - spin_lock_irq(¤t->page_lock); 12.264 - if ( (pte_pfn >= max_page) || 12.265 - ((pte_page->flags & (PG_type_mask | PG_domain_mask)) != 12.266 - (PGT_l1_page_table | current->domain)) ) 12.267 - { 12.268 - DPRINTK("Bad page frame for ppte %d,%08lx,%08lx,%08lx\n", 12.269 - current->domain, pte_pfn, max_page, pte_page->flags); 12.270 - spin_unlock_irq(¤t->page_lock); 12.271 - make_rx_response(vif, rx.id, 0, RING_STATUS_BAD_PAGE, 0); 12.272 - continue; 12.273 - } 12.274 - 12.275 - ptep = map_domain_mem(rx.addr); 12.276 - 12.277 - if ( !(*ptep & _PAGE_PRESENT) ) 12.278 - { 12.279 - DPRINTK("Invalid PTE passed down (not present)\n"); 12.280 - make_rx_response(vif, rx.id, 0, RING_STATUS_BAD_PAGE, 0); 12.281 - goto rx_unmap_and_continue; 12.282 - } 12.283 - 12.284 - buf_pfn = *ptep >> PAGE_SHIFT; 12.285 - buf_page = frame_table + buf_pfn; 12.286 + switch ( op ) 12.287 + { 12.288 + case NETOP_PUSH_BUFFERS: 12.289 + ret = get_bufs_from_vif(vif); 12.290 + break; 12.291 12.292 - if ( ((buf_page->flags & (PG_type_mask | PG_domain_mask)) != 12.293 - (PGT_writeable_page | current->domain)) || 12.294 - (buf_page->tot_count != 1) ) 12.295 - { 12.296 - DPRINTK("Need a mapped-once writeable page (%ld/%ld/%08lx)\n", 12.297 - buf_page->type_count, buf_page->tot_count, buf_page->flags); 12.298 - make_rx_response(vif, rx.id, 0, RING_STATUS_BAD_PAGE, 0); 12.299 - goto rx_unmap_and_continue; 12.300 - } 12.301 - 12.302 - /* 12.303 - * The pte they passed was good, so take it away from them. We 12.304 - * also lock down the page-table page, so it doesn't go away. 12.305 - */ 12.306 - get_page_type(pte_page); 12.307 - get_page_tot(pte_page); 12.308 - *ptep &= ~_PAGE_PRESENT; 12.309 - buf_page->flags = buf_page->type_count = buf_page->tot_count = 0; 12.310 - list_del(&buf_page->list); 12.311 + case NETOP_FLUSH_BUFFERS: 12.312 + ret = flush_bufs_for_vif(vif); 12.313 + break; 12.314 12.315 - vif->rx_shadow_ring[j].id = rx.id; 12.316 - vif->rx_shadow_ring[j].pte_ptr = rx.addr; 12.317 - vif->rx_shadow_ring[j].buf_pfn = buf_pfn; 12.318 - vif->rx_shadow_ring[j].flush_count = (unsigned short) 12.319 - atomic_read(&tlb_flush_count[smp_processor_id()]); 12.320 - j = RX_RING_INC(j); 12.321 - 12.322 - rx_unmap_and_continue: 12.323 - unmap_domain_mem(ptep); 12.324 - spin_unlock_irq(¤t->page_lock); 12.325 - } 12.326 - 12.327 - vif->rx_req_cons = i; 12.328 - 12.329 - if ( vif->rx_prod != j ) 12.330 - { 12.331 - smp_mb(); /* Let other CPUs see new descriptors first. */ 12.332 - vif->rx_prod = j; 12.333 - } 12.334 + default: 12.335 + ret = -EINVAL; 12.336 + break; 12.337 } 12.338 12.339 - return 0; 12.340 + return ret; 12.341 } 12.342 12.343
13.1 --- a/xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_block.c Tue Oct 28 20:59:42 2003 +0000 13.2 +++ b/xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_block.c Wed Oct 29 14:43:22 2003 +0000 13.3 @@ -40,7 +40,7 @@ static inline void signal_requests_to_xe 13.4 { 13.5 DISABLE_SCATTERGATHER(); 13.6 blk_ring->req_prod = req_prod; 13.7 - HYPERVISOR_block_io_op(); 13.8 + HYPERVISOR_block_io_op(BLKOP_PUSH_BUFFERS); 13.9 } 13.10 13.11
14.1 --- a/xenolinux-2.4.22-sparse/arch/xeno/drivers/network/network.c Tue Oct 28 20:59:42 2003 +0000 14.2 +++ b/xenolinux-2.4.22-sparse/arch/xeno/drivers/network/network.c Wed Oct 29 14:43:22 2003 +0000 14.3 @@ -249,7 +249,7 @@ static void network_alloc_rx_buffers(str 14.4 /* Batch Xen notifications. */ 14.5 if ( np->rx_bufs_to_notify > (RX_MAX_ENTRIES/4) ) 14.6 { 14.7 - HYPERVISOR_net_update(); 14.8 + HYPERVISOR_net_io_op(NETOP_PUSH_BUFFERS, np->idx); 14.9 np->rx_bufs_to_notify = 0; 14.10 } 14.11 } 14.12 @@ -322,7 +322,7 @@ static int network_start_xmit(struct sk_ 14.13 /* Only notify Xen if there are no outstanding responses. */ 14.14 mb(); 14.15 if ( np->net_idx->tx_resp_prod == i ) 14.16 - HYPERVISOR_net_update(); 14.17 + HYPERVISOR_net_io_op(NETOP_PUSH_BUFFERS, np->idx); 14.18 14.19 return 0; 14.20 }
15.1 --- a/xenolinux-2.4.22-sparse/arch/xeno/kernel/head.S Tue Oct 28 20:59:42 2003 +0000 15.2 +++ b/xenolinux-2.4.22-sparse/arch/xeno/kernel/head.S Wed Oct 29 14:43:22 2003 +0000 15.3 @@ -9,9 +9,8 @@ 15.4 #include <asm/desc.h> 15.5 15.6 /* Offsets in start_info structure */ 15.7 -#define SHARED_INFO 4 15.8 -#define MOD_START 12 15.9 -#define MOD_LEN 16 15.10 +#define MOD_START 4 15.11 +#define MOD_LEN 8 15.12 15.13 startup_32: 15.14 cld 15.15 @@ -35,19 +34,16 @@ 1: sub $4,%eax 15.16 15.17 /* Clear BSS first so that there are no surprises... */ 15.18 2: xorl %eax,%eax 15.19 - movl $SYMBOL_NAME(__bss_start),%edi 15.20 - movl $SYMBOL_NAME(_end),%ecx 15.21 - subl %edi,%ecx 15.22 - rep stosb 15.23 + movl $SYMBOL_NAME(__bss_start),%edi 15.24 + movl $SYMBOL_NAME(_end),%ecx 15.25 + subl %edi,%ecx 15.26 + rep stosb 15.27 15.28 /* Copy the necessary stuff from start_info structure. */ 15.29 - /* We need to copy shared_info early, so that sti/cli work */ 15.30 - mov SHARED_INFO(%esi),%eax 15.31 - mov %eax,SYMBOL_NAME(HYPERVISOR_shared_info) 15.32 mov $SYMBOL_NAME(start_info_union),%edi 15.33 mov $128,%ecx 15.34 rep movsl 15.35 - 15.36 + 15.37 jmp SYMBOL_NAME(start_kernel) 15.38 15.39 ENTRY(stack_start)
16.1 --- a/xenolinux-2.4.22-sparse/arch/xeno/kernel/setup.c Tue Oct 28 20:59:42 2003 +0000 16.2 +++ b/xenolinux-2.4.22-sparse/arch/xeno/kernel/setup.c Wed Oct 29 14:43:22 2003 +0000 16.3 @@ -46,7 +46,11 @@ 16.4 #include <asm/hypervisor.h> 16.5 #include <asm/hypervisor-ifs/dom0_ops.h> 16.6 16.7 -shared_info_t *HYPERVISOR_shared_info; 16.8 +/* 16.9 + * Point at the empty zero page to start with. We map the real shared_info 16.10 + * page as soon as fixmap is up and running. 16.11 + */ 16.12 +shared_info_t *HYPERVISOR_shared_info = empty_zero_page; 16.13 16.14 unsigned long *phys_to_machine_mapping; 16.15
17.1 --- a/xenolinux-2.4.22-sparse/arch/xeno/mm/init.c Tue Oct 28 20:59:42 2003 +0000 17.2 +++ b/xenolinux-2.4.22-sparse/arch/xeno/mm/init.c Wed Oct 29 14:43:22 2003 +0000 17.3 @@ -43,50 +43,50 @@ static unsigned long totalhigh_pages; 17.4 17.5 int do_check_pgt_cache(int low, int high) 17.6 { 17.7 - int freed = 0; 17.8 - if(pgtable_cache_size > high) { 17.9 - do { 17.10 + int freed = 0; 17.11 + if(pgtable_cache_size > high) { 17.12 + do { 17.13 if (!QUICKLIST_EMPTY(pgd_quicklist)) { 17.14 - free_pgd_slow(get_pgd_fast()); 17.15 - freed++; 17.16 - } 17.17 + free_pgd_slow(get_pgd_fast()); 17.18 + freed++; 17.19 + } 17.20 if (!QUICKLIST_EMPTY(pte_quicklist)) { 17.21 - pte_free_slow(pte_alloc_one_fast(NULL, 0)); 17.22 - freed++; 17.23 - } 17.24 - } while(pgtable_cache_size > low); 17.25 - } 17.26 - return freed; 17.27 + pte_free_slow(pte_alloc_one_fast(NULL, 0)); 17.28 + freed++; 17.29 + } 17.30 + } while(pgtable_cache_size > low); 17.31 + } 17.32 + return freed; 17.33 } 17.34 17.35 void show_mem(void) 17.36 { 17.37 - int i, total = 0, reserved = 0; 17.38 - int shared = 0, cached = 0; 17.39 - int highmem = 0; 17.40 + int i, total = 0, reserved = 0; 17.41 + int shared = 0, cached = 0; 17.42 + int highmem = 0; 17.43 17.44 - printk("Mem-info:\n"); 17.45 - show_free_areas(); 17.46 - printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); 17.47 - i = max_mapnr; 17.48 - while (i-- > 0) { 17.49 - total++; 17.50 - if (PageHighMem(mem_map+i)) 17.51 - highmem++; 17.52 - if (PageReserved(mem_map+i)) 17.53 - reserved++; 17.54 - else if (PageSwapCache(mem_map+i)) 17.55 - cached++; 17.56 - else if (page_count(mem_map+i)) 17.57 - shared += page_count(mem_map+i) - 1; 17.58 - } 17.59 - printk("%d pages of RAM\n", total); 17.60 - printk("%d pages of HIGHMEM\n",highmem); 17.61 - printk("%d reserved pages\n",reserved); 17.62 - printk("%d pages shared\n",shared); 17.63 - printk("%d pages swap cached\n",cached); 17.64 - printk("%ld pages in page table cache\n",pgtable_cache_size); 17.65 - show_buffers(); 17.66 + printk("Mem-info:\n"); 17.67 + show_free_areas(); 17.68 + printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); 17.69 + i = max_mapnr; 17.70 + while (i-- > 0) { 17.71 + total++; 17.72 + if (PageHighMem(mem_map+i)) 17.73 + highmem++; 17.74 + if (PageReserved(mem_map+i)) 17.75 + reserved++; 17.76 + else if (PageSwapCache(mem_map+i)) 17.77 + cached++; 17.78 + else if (page_count(mem_map+i)) 17.79 + shared += page_count(mem_map+i) - 1; 17.80 + } 17.81 + printk("%d pages of RAM\n", total); 17.82 + printk("%d pages of HIGHMEM\n",highmem); 17.83 + printk("%d reserved pages\n",reserved); 17.84 + printk("%d pages shared\n",shared); 17.85 + printk("%d pages swap cached\n",cached); 17.86 + printk("%ld pages in page table cache\n",pgtable_cache_size); 17.87 + show_buffers(); 17.88 } 17.89 17.90 /* References to section boundaries */ 17.91 @@ -95,118 +95,118 @@ extern char _text, _etext, _edata, __bss 17.92 extern char __init_begin, __init_end; 17.93 17.94 static inline void set_pte_phys (unsigned long vaddr, 17.95 - unsigned long phys, pgprot_t flags) 17.96 + unsigned long phys, pgprot_t flags) 17.97 { 17.98 - pgprot_t prot; 17.99 - pgd_t *pgd; 17.100 - pmd_t *pmd; 17.101 - pte_t *pte; 17.102 + pgprot_t prot; 17.103 + pgd_t *pgd; 17.104 + pmd_t *pmd; 17.105 + pte_t *pte; 17.106 17.107 - pgd = init_mm.pgd + __pgd_offset(vaddr); 17.108 - if (pgd_none(*pgd)) { 17.109 - printk("PAE BUG #00!\n"); 17.110 - return; 17.111 - } 17.112 - pmd = pmd_offset(pgd, vaddr); 17.113 - if (pmd_none(*pmd)) { 17.114 - printk("PAE BUG #01!\n"); 17.115 - return; 17.116 - } 17.117 - pte = pte_offset(pmd, vaddr); 17.118 + pgd = init_mm.pgd + __pgd_offset(vaddr); 17.119 + if (pgd_none(*pgd)) { 17.120 + printk("PAE BUG #00!\n"); 17.121 + return; 17.122 + } 17.123 + pmd = pmd_offset(pgd, vaddr); 17.124 + if (pmd_none(*pmd)) { 17.125 + printk("PAE BUG #01!\n"); 17.126 + return; 17.127 + } 17.128 + pte = pte_offset(pmd, vaddr); 17.129 17.130 - if (pte_val(*pte)) 17.131 - pte_ERROR(*pte); 17.132 + if (pte_val(*pte)) 17.133 + pte_ERROR(*pte); 17.134 17.135 - pgprot_val(prot) = pgprot_val(PAGE_KERNEL) | pgprot_val(flags); 17.136 + pgprot_val(prot) = pgprot_val(PAGE_KERNEL) | pgprot_val(flags); 17.137 17.138 - /* We queue directly, avoiding hidden phys->machine translation. */ 17.139 - queue_l1_entry_update(pte, phys | pgprot_val(prot)); 17.140 + /* We queue directly, avoiding hidden phys->machine translation. */ 17.141 + queue_l1_entry_update(pte, phys | pgprot_val(prot)); 17.142 17.143 - /* 17.144 - * It's enough to flush this one mapping. 17.145 - * (PGE mappings get flushed as well) 17.146 - */ 17.147 - __flush_tlb_one(vaddr); 17.148 + /* 17.149 + * It's enough to flush this one mapping. 17.150 + * (PGE mappings get flushed as well) 17.151 + */ 17.152 + __flush_tlb_one(vaddr); 17.153 } 17.154 17.155 void __set_fixmap (enum fixed_addresses idx, unsigned long phys, 17.156 pgprot_t flags) 17.157 { 17.158 - unsigned long address = __fix_to_virt(idx); 17.159 + unsigned long address = __fix_to_virt(idx); 17.160 17.161 - if (idx >= __end_of_fixed_addresses) { 17.162 - printk("Invalid __set_fixmap\n"); 17.163 - return; 17.164 - } 17.165 - set_pte_phys(address, phys, flags); 17.166 + if (idx >= __end_of_fixed_addresses) { 17.167 + printk("Invalid __set_fixmap\n"); 17.168 + return; 17.169 + } 17.170 + set_pte_phys(address, phys, flags); 17.171 } 17.172 17.173 static void __init fixrange_init (unsigned long start, 17.174 unsigned long end, pgd_t *pgd_base) 17.175 { 17.176 - pgd_t *pgd, *kpgd; 17.177 - pmd_t *pmd, *kpmd; 17.178 - pte_t *pte, *kpte; 17.179 - int i, j; 17.180 - unsigned long vaddr; 17.181 + pgd_t *pgd, *kpgd; 17.182 + pmd_t *pmd, *kpmd; 17.183 + pte_t *pte, *kpte; 17.184 + int i, j; 17.185 + unsigned long vaddr; 17.186 17.187 - vaddr = start; 17.188 - i = __pgd_offset(vaddr); 17.189 - j = __pmd_offset(vaddr); 17.190 - pgd = pgd_base + i; 17.191 + vaddr = start; 17.192 + i = __pgd_offset(vaddr); 17.193 + j = __pmd_offset(vaddr); 17.194 + pgd = pgd_base + i; 17.195 17.196 - for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 17.197 + for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 17.198 #if CONFIG_X86_PAE 17.199 - if (pgd_none(*pgd)) { 17.200 - pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); 17.201 - set_pgd(pgd, __pgd(__pa(pmd) + 0x1)); 17.202 - if (pmd != pmd_offset(pgd, 0)) 17.203 - printk("PAE BUG #02!\n"); 17.204 - } 17.205 - pmd = pmd_offset(pgd, vaddr); 17.206 + if (pgd_none(*pgd)) { 17.207 + pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); 17.208 + set_pgd(pgd, __pgd(__pa(pmd) + 0x1)); 17.209 + if (pmd != pmd_offset(pgd, 0)) 17.210 + printk("PAE BUG #02!\n"); 17.211 + } 17.212 + pmd = pmd_offset(pgd, vaddr); 17.213 #else 17.214 - pmd = (pmd_t *)pgd; 17.215 + pmd = (pmd_t *)pgd; 17.216 #endif 17.217 - for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) { 17.218 - if (pmd_none(*pmd)) { 17.219 - pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 17.220 - clear_page(pte); 17.221 - kpgd = pgd_offset_k((unsigned long)pte); 17.222 - kpmd = pmd_offset(kpgd, (unsigned long)pte); 17.223 - kpte = pte_offset(kpmd, (unsigned long)pte); 17.224 - queue_l1_entry_update(kpte, 17.225 + for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) { 17.226 + if (pmd_none(*pmd)) { 17.227 + pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 17.228 + clear_page(pte); 17.229 + kpgd = pgd_offset_k((unsigned long)pte); 17.230 + kpmd = pmd_offset(kpgd, (unsigned long)pte); 17.231 + kpte = pte_offset(kpmd, (unsigned long)pte); 17.232 + queue_l1_entry_update(kpte, 17.233 (*(unsigned long *)kpte)&~_PAGE_RW); 17.234 17.235 - set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte))); 17.236 - } 17.237 - vaddr += PMD_SIZE; 17.238 - } 17.239 - j = 0; 17.240 - } 17.241 + set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte))); 17.242 + } 17.243 + vaddr += PMD_SIZE; 17.244 + } 17.245 + j = 0; 17.246 + } 17.247 17.248 - XENO_flush_page_update_queue(); 17.249 + XENO_flush_page_update_queue(); 17.250 } 17.251 17.252 17.253 static void __init zone_sizes_init(void) 17.254 { 17.255 - unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; 17.256 - unsigned int max_dma, high, low; 17.257 + unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; 17.258 + unsigned int max_dma, high, low; 17.259 17.260 - max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; 17.261 - low = max_low_pfn; 17.262 - high = highend_pfn; 17.263 + max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; 17.264 + low = max_low_pfn; 17.265 + high = highend_pfn; 17.266 17.267 - if (low < max_dma) 17.268 - zones_size[ZONE_DMA] = low; 17.269 - else { 17.270 - zones_size[ZONE_DMA] = max_dma; 17.271 - zones_size[ZONE_NORMAL] = low - max_dma; 17.272 + if (low < max_dma) 17.273 + zones_size[ZONE_DMA] = low; 17.274 + else { 17.275 + zones_size[ZONE_DMA] = max_dma; 17.276 + zones_size[ZONE_NORMAL] = low - max_dma; 17.277 #ifdef CONFIG_HIGHMEM 17.278 - zones_size[ZONE_HIGHMEM] = high - low; 17.279 + zones_size[ZONE_HIGHMEM] = high - low; 17.280 #endif 17.281 - } 17.282 - free_area_init(zones_size); 17.283 + } 17.284 + free_area_init(zones_size); 17.285 } 17.286 17.287 /* 17.288 @@ -218,60 +218,65 @@ static void __init zone_sizes_init(void) 17.289 */ 17.290 void __init paging_init(void) 17.291 { 17.292 - unsigned long vaddr; 17.293 + unsigned long vaddr; 17.294 17.295 - zone_sizes_init(); 17.296 + zone_sizes_init(); 17.297 17.298 - /* 17.299 - * Fixed mappings, only the page table structure has to be created - 17.300 - * mappings will be set by set_fixmap(): 17.301 - */ 17.302 - vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 17.303 - fixrange_init(vaddr, HYPERVISOR_VIRT_START, init_mm.pgd); 17.304 + /* 17.305 + * Fixed mappings, only the page table structure has to be created - 17.306 + * mappings will be set by set_fixmap(): 17.307 + */ 17.308 + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 17.309 + fixrange_init(vaddr, HYPERVISOR_VIRT_START, init_mm.pgd); 17.310 17.311 - /* Cheesy: this can probably be moved to the blkdev driver. */ 17.312 - set_fixmap(FIX_BLKRING_BASE, start_info.blk_ring); 17.313 + /* Cheesy: this can probably be moved to the blkdev driver. */ 17.314 + set_fixmap(FIX_BLKRING_BASE, start_info.blk_ring); 17.315 + 17.316 + /* Switch to the real shared_info page, and clear the dummy page. */ 17.317 + set_fixmap(FIX_SHARED_INFO, start_info.shared_info); 17.318 + HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO); 17.319 + memset(empty_zero_page, 0, sizeof(empty_zero_page)); 17.320 17.321 #ifdef CONFIG_HIGHMEM 17.322 #error 17.323 - kmap_init(); 17.324 + kmap_init(); 17.325 #endif 17.326 } 17.327 17.328 static inline int page_is_ram (unsigned long pagenr) 17.329 { 17.330 - return 1; 17.331 + return 1; 17.332 } 17.333 17.334 #ifdef CONFIG_HIGHMEM 17.335 void __init one_highpage_init(struct page *page, int pfn, int bad_ppro) 17.336 { 17.337 - if (!page_is_ram(pfn)) { 17.338 - SetPageReserved(page); 17.339 - return; 17.340 - } 17.341 + if (!page_is_ram(pfn)) { 17.342 + SetPageReserved(page); 17.343 + return; 17.344 + } 17.345 17.346 - if (bad_ppro && page_kills_ppro(pfn)) { 17.347 - SetPageReserved(page); 17.348 - return; 17.349 - } 17.350 + if (bad_ppro && page_kills_ppro(pfn)) { 17.351 + SetPageReserved(page); 17.352 + return; 17.353 + } 17.354 17.355 - ClearPageReserved(page); 17.356 - set_bit(PG_highmem, &page->flags); 17.357 - atomic_set(&page->count, 1); 17.358 - __free_page(page); 17.359 - totalhigh_pages++; 17.360 + ClearPageReserved(page); 17.361 + set_bit(PG_highmem, &page->flags); 17.362 + atomic_set(&page->count, 1); 17.363 + __free_page(page); 17.364 + totalhigh_pages++; 17.365 } 17.366 #endif /* CONFIG_HIGHMEM */ 17.367 17.368 static void __init set_max_mapnr_init(void) 17.369 { 17.370 #ifdef CONFIG_HIGHMEM 17.371 - highmem_start_page = mem_map + highstart_pfn; 17.372 - max_mapnr = num_physpages = highend_pfn; 17.373 - num_mappedpages = max_low_pfn; 17.374 + highmem_start_page = mem_map + highstart_pfn; 17.375 + max_mapnr = num_physpages = highend_pfn; 17.376 + num_mappedpages = max_low_pfn; 17.377 #else 17.378 - max_mapnr = num_mappedpages = num_physpages = max_low_pfn; 17.379 + max_mapnr = num_mappedpages = num_physpages = max_low_pfn; 17.380 #endif 17.381 } 17.382 17.383 @@ -279,112 +284,112 @@ static int __init free_pages_init(void) 17.384 { 17.385 #ifdef CONFIG_HIGHMEM 17.386 #error Where is this supposed to be initialised? 17.387 - int bad_ppro; 17.388 + int bad_ppro; 17.389 #endif 17.390 - int reservedpages, pfn; 17.391 + int reservedpages, pfn; 17.392 17.393 - /* this will put all low memory onto the freelists */ 17.394 - totalram_pages += free_all_bootmem(); 17.395 + /* this will put all low memory onto the freelists */ 17.396 + totalram_pages += free_all_bootmem(); 17.397 17.398 - reservedpages = 0; 17.399 - for (pfn = 0; pfn < max_low_pfn; pfn++) { 17.400 - /* 17.401 - * Only count reserved RAM pages 17.402 - */ 17.403 - if (page_is_ram(pfn) && PageReserved(mem_map+pfn)) 17.404 - reservedpages++; 17.405 - } 17.406 + reservedpages = 0; 17.407 + for (pfn = 0; pfn < max_low_pfn; pfn++) { 17.408 + /* 17.409 + * Only count reserved RAM pages 17.410 + */ 17.411 + if (page_is_ram(pfn) && PageReserved(mem_map+pfn)) 17.412 + reservedpages++; 17.413 + } 17.414 #ifdef CONFIG_HIGHMEM 17.415 - for (pfn = highend_pfn-1; pfn >= highstart_pfn; pfn--) 17.416 - one_highpage_init((struct page *) (mem_map + pfn), pfn, bad_ppro); 17.417 - totalram_pages += totalhigh_pages; 17.418 + for (pfn = highend_pfn-1; pfn >= highstart_pfn; pfn--) 17.419 + one_highpage_init((struct page *) (mem_map + pfn), pfn, bad_ppro); 17.420 + totalram_pages += totalhigh_pages; 17.421 #endif 17.422 - return reservedpages; 17.423 + return reservedpages; 17.424 } 17.425 17.426 void __init mem_init(void) 17.427 { 17.428 - int codesize, reservedpages, datasize, initsize; 17.429 + int codesize, reservedpages, datasize, initsize; 17.430 17.431 - if (!mem_map) 17.432 - BUG(); 17.433 + if (!mem_map) 17.434 + BUG(); 17.435 17.436 - set_max_mapnr_init(); 17.437 + set_max_mapnr_init(); 17.438 17.439 - high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 17.440 + high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 17.441 17.442 - /* clear the zero-page */ 17.443 - memset(empty_zero_page, 0, PAGE_SIZE); 17.444 + /* clear the zero-page */ 17.445 + memset(empty_zero_page, 0, PAGE_SIZE); 17.446 17.447 - reservedpages = free_pages_init(); 17.448 + reservedpages = free_pages_init(); 17.449 17.450 - codesize = (unsigned long) &_etext - (unsigned long) &_text; 17.451 - datasize = (unsigned long) &_edata - (unsigned long) &_etext; 17.452 - initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 17.453 + codesize = (unsigned long) &_etext - (unsigned long) &_text; 17.454 + datasize = (unsigned long) &_edata - (unsigned long) &_etext; 17.455 + initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 17.456 17.457 - printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n", 17.458 - (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 17.459 - max_mapnr << (PAGE_SHIFT-10), 17.460 - codesize >> 10, 17.461 - reservedpages << (PAGE_SHIFT-10), 17.462 - datasize >> 10, 17.463 - initsize >> 10, 17.464 - (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) 17.465 - ); 17.466 + printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n", 17.467 + (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 17.468 + max_mapnr << (PAGE_SHIFT-10), 17.469 + codesize >> 10, 17.470 + reservedpages << (PAGE_SHIFT-10), 17.471 + datasize >> 10, 17.472 + initsize >> 10, 17.473 + (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) 17.474 + ); 17.475 17.476 boot_cpu_data.wp_works_ok = 1; 17.477 } 17.478 17.479 void free_initmem(void) 17.480 { 17.481 - unsigned long addr; 17.482 + unsigned long addr; 17.483 17.484 - addr = (unsigned long)(&__init_begin); 17.485 - for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { 17.486 - ClearPageReserved(virt_to_page(addr)); 17.487 - set_page_count(virt_to_page(addr), 1); 17.488 - free_page(addr); 17.489 - totalram_pages++; 17.490 - } 17.491 - printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10); 17.492 + addr = (unsigned long)(&__init_begin); 17.493 + for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { 17.494 + ClearPageReserved(virt_to_page(addr)); 17.495 + set_page_count(virt_to_page(addr), 1); 17.496 + free_page(addr); 17.497 + totalram_pages++; 17.498 + } 17.499 + printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10); 17.500 } 17.501 17.502 #ifdef CONFIG_BLK_DEV_INITRD 17.503 void free_initrd_mem(unsigned long start, unsigned long end) 17.504 { 17.505 - if (start < end) 17.506 - printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 17.507 - for (; start < end; start += PAGE_SIZE) { 17.508 - ClearPageReserved(virt_to_page(start)); 17.509 - set_page_count(virt_to_page(start), 1); 17.510 - free_page(start); 17.511 - totalram_pages++; 17.512 - } 17.513 + if (start < end) 17.514 + printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 17.515 + for (; start < end; start += PAGE_SIZE) { 17.516 + ClearPageReserved(virt_to_page(start)); 17.517 + set_page_count(virt_to_page(start), 1); 17.518 + free_page(start); 17.519 + totalram_pages++; 17.520 + } 17.521 } 17.522 #endif 17.523 17.524 void si_meminfo(struct sysinfo *val) 17.525 { 17.526 - val->totalram = totalram_pages; 17.527 - val->sharedram = 0; 17.528 - val->freeram = nr_free_pages(); 17.529 - val->bufferram = atomic_read(&buffermem_pages); 17.530 - val->totalhigh = totalhigh_pages; 17.531 - val->freehigh = nr_free_highpages(); 17.532 - val->mem_unit = PAGE_SIZE; 17.533 - return; 17.534 + val->totalram = totalram_pages; 17.535 + val->sharedram = 0; 17.536 + val->freeram = nr_free_pages(); 17.537 + val->bufferram = atomic_read(&buffermem_pages); 17.538 + val->totalhigh = totalhigh_pages; 17.539 + val->freehigh = nr_free_highpages(); 17.540 + val->mem_unit = PAGE_SIZE; 17.541 + return; 17.542 } 17.543 17.544 #if defined(CONFIG_X86_PAE) 17.545 struct kmem_cache_s *pae_pgd_cachep; 17.546 void __init pgtable_cache_init(void) 17.547 { 17.548 - /* 17.549 - * PAE pgds must be 16-byte aligned: 17.550 + /* 17.551 + * PAE pgds must be 16-byte aligned: 17.552 */ 17.553 - pae_pgd_cachep = kmem_cache_create("pae_pgd", 32, 0, 17.554 - SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, NULL, NULL); 17.555 - if (!pae_pgd_cachep) 17.556 - panic("init_pae(): Cannot alloc pae_pgd SLAB cache"); 17.557 + pae_pgd_cachep = kmem_cache_create("pae_pgd", 32, 0, 17.558 + SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, NULL, NULL); 17.559 + if (!pae_pgd_cachep) 17.560 + panic("init_pae(): Cannot alloc pae_pgd SLAB cache"); 17.561 } 17.562 #endif /* CONFIG_X86_PAE */
18.1 --- a/xenolinux-2.4.22-sparse/include/asm-xeno/fixmap.h Tue Oct 28 20:59:42 2003 +0000 18.2 +++ b/xenolinux-2.4.22-sparse/include/asm-xeno/fixmap.h Wed Oct 29 14:43:22 2003 +0000 18.3 @@ -47,15 +47,16 @@ enum fixed_addresses { 18.4 FIX_NETRING1_BASE, 18.5 FIX_NETRING2_BASE, 18.6 FIX_NETRING3_BASE, 18.7 + FIX_SHARED_INFO, 18.8 18.9 #ifdef CONFIG_VGA_CONSOLE 18.10 #define NR_FIX_BTMAPS 32 /* 128KB For the Dom0 VGA Console A0000-C0000 */ 18.11 #else 18.12 -#define NR_FIX_BTMAPS 1 /* have on page incase anyone wants it in future */ 18.13 +#define NR_FIX_BTMAPS 1 /* in case anyone wants it in future... */ 18.14 #endif 18.15 FIX_BTMAP_END, 18.16 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1, 18.17 - /* our bt_ioremap is permenant unlike other architectures */ 18.18 + /* our bt_ioremap is permanent, unlike other architectures */ 18.19 18.20 __end_of_permanent_fixed_addresses, 18.21 __end_of_fixed_addresses = __end_of_permanent_fixed_addresses
19.1 --- a/xenolinux-2.4.22-sparse/include/asm-xeno/hypervisor.h Tue Oct 28 20:59:42 2003 +0000 19.2 +++ b/xenolinux-2.4.22-sparse/include/asm-xeno/hypervisor.h Wed Oct 29 14:43:22 2003 +0000 19.3 @@ -219,12 +219,13 @@ static inline int HYPERVISOR_set_callbac 19.4 return ret; 19.5 } 19.6 19.7 -static inline int HYPERVISOR_net_update(void) 19.8 +static inline int HYPERVISOR_net_io_op(unsigned int op, unsigned int idx) 19.9 { 19.10 int ret; 19.11 __asm__ __volatile__ ( 19.12 TRAP_INSTR 19.13 - : "=a" (ret) : "0" (__HYPERVISOR_net_update) ); 19.14 + : "=a" (ret) : "0" (__HYPERVISOR_net_io_op), 19.15 + "b" (op), "c" (idx) ); 19.16 19.17 return ret; 19.18 } 19.19 @@ -281,12 +282,13 @@ static inline int HYPERVISOR_network_op( 19.20 return ret; 19.21 } 19.22 19.23 -static inline int HYPERVISOR_block_io_op(void) 19.24 +static inline int HYPERVISOR_block_io_op(unsigned int op) 19.25 { 19.26 int ret; 19.27 __asm__ __volatile__ ( 19.28 TRAP_INSTR 19.29 - : "=a" (ret) : "0" (__HYPERVISOR_block_io_op) ); 19.30 + : "=a" (ret) : "0" (__HYPERVISOR_block_io_op), 19.31 + "b" (op) ); 19.32 19.33 return ret; 19.34 }