direct-io.hg
changeset 1379:6364e3c99d29
bitkeeper revision 1.891.1.16 (40a38fb5auV2wZtbB0nLg2hIQ75DjA)
Optimisations for new network IO model. Much better receive
performance.
Optimisations for new network IO model. Much better receive
performance.
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Thu May 13 15:09:41 2004 +0000 (2004-05-13) |
parents | 623355fbbab9 |
children | d2776001835f |
files | xen/arch/i386/entry.S xen/common/dom_mem_ops.c xen/common/domain.c xen/common/memory.c xen/include/hypervisor-ifs/hypervisor-if.h xenolinux-2.4.26-sparse/arch/xen/drivers/netif/backend/main.c xenolinux-2.4.26-sparse/arch/xen/drivers/netif/frontend/main.c xenolinux-2.4.26-sparse/include/asm-xen/hypervisor.h |
line diff
1.1 --- a/xen/arch/i386/entry.S Thu May 13 09:59:09 2004 +0000 1.2 +++ b/xen/arch/i386/entry.S Thu May 13 15:09:41 2004 +0000 1.3 @@ -183,20 +183,22 @@ do_multicall: 1.4 multicall_loop: 1.5 pushl %ecx 1.6 multicall_fault1: 1.7 - pushl 20(%ebx) 1.8 + pushl 20(%ebx) # args[4] 1.9 multicall_fault2: 1.10 - pushl 16(%ebx) 1.11 + pushl 16(%ebx) # args[3] 1.12 multicall_fault3: 1.13 - pushl 12(%ebx) 1.14 + pushl 12(%ebx) # args[2] 1.15 multicall_fault4: 1.16 - pushl 8(%ebx) 1.17 + pushl 8(%ebx) # args[1] 1.18 multicall_fault5: 1.19 - pushl 4(%ebx) 1.20 + pushl 4(%ebx) # args[0] 1.21 multicall_fault6: 1.22 - movl (%ebx),%eax 1.23 + movl (%ebx),%eax # op 1.24 andl $255,%eax 1.25 call *SYMBOL_NAME(hypervisor_call_table)(,%eax,4) 1.26 multicall_return_from_call: 1.27 +multicall_fault7: 1.28 + movl %eax,24(%ebx) # args[5] == result 1.29 addl $20,%esp 1.30 popl %ecx 1.31 addl $(ARGS_PER_MULTICALL_ENTRY*4),%ebx 1.32 @@ -745,6 +747,7 @@ ENTRY(hypervisor_call_table) 1.33 .long SYMBOL_NAME(do_xen_version) 1.34 .long SYMBOL_NAME(do_console_io) 1.35 .long SYMBOL_NAME(do_physdev_op) 1.36 + .long SYMBOL_NAME(do_update_va_mapping_otherdomain) /* 25 */ 1.37 .rept NR_syscalls-((.-hypervisor_call_table)/4) 1.38 .long SYMBOL_NAME(do_ni_syscall) 1.39 .endr
2.1 --- a/xen/common/dom_mem_ops.c Thu May 13 09:59:09 2004 +0000 2.2 +++ b/xen/common/dom_mem_ops.c Thu May 13 15:09:41 2004 +0000 2.3 @@ -18,24 +18,22 @@ 2.4 2.5 static long alloc_dom_mem(struct task_struct *p, reservation_increase_t op) 2.6 { 2.7 - struct pfn_info *page; 2.8 - unsigned long mpfn; /* machine frame number of current page */ 2.9 - void *va; /* Xen-usable mapping of current page */ 2.10 - unsigned long i; 2.11 + struct pfn_info *page; 2.12 + unsigned long i; 2.13 + 2.14 + /* Leave some slack pages; e.g., for the network. */ 2.15 + if ( unlikely(free_pfns < (op.size + (SLACK_DOMAIN_MEM_KILOBYTES >> 2.16 + (PAGE_SHIFT-10)))) ) 2.17 + { 2.18 + DPRINTK("Not enough slack: %u %u\n", 2.19 + free_pfns, 2.20 + SLACK_DOMAIN_MEM_KILOBYTES >> (PAGE_SHIFT-10)); 2.21 + return 0; 2.22 + } 2.23 2.24 for ( i = 0; i < op.size; i++ ) 2.25 { 2.26 - /* Leave some slack pages; e.g., for the network. */ 2.27 - if ( unlikely(free_pfns < (SLACK_DOMAIN_MEM_KILOBYTES >> 2.28 - (PAGE_SHIFT-10))) ) 2.29 - { 2.30 - DPRINTK("Not enough slack: %u %u\n", 2.31 - free_pfns, 2.32 - SLACK_DOMAIN_MEM_KILOBYTES >> (PAGE_SHIFT-10)); 2.33 - break; 2.34 - } 2.35 - 2.36 - /* NB. 'alloc_domain_page' does limit checking on pages per domain. */ 2.37 + /* NB. 'alloc_domain_page' does limit-checking on pages per domain. */ 2.38 if ( unlikely((page = alloc_domain_page(p)) == NULL) ) 2.39 { 2.40 DPRINTK("Could not allocate a frame\n"); 2.41 @@ -43,14 +41,8 @@ static long alloc_dom_mem(struct task_st 2.42 } 2.43 2.44 /* Inform the domain of the new page's machine address. */ 2.45 - mpfn = (unsigned long)(page - frame_table); 2.46 - copy_to_user(op.pages, &mpfn, sizeof(mpfn)); 2.47 - op.pages++; 2.48 - 2.49 - /* Zero out the page to prevent information leakage. */ 2.50 - va = map_domain_mem(mpfn << PAGE_SHIFT); 2.51 - memset(va, 0, PAGE_SIZE); 2.52 - unmap_domain_mem(va); 2.53 + if ( unlikely(put_user(page_to_pfn(page), &op.pages[i]) != 0) ) 2.54 + break; 2.55 } 2.56 2.57 return i; 2.58 @@ -58,22 +50,21 @@ static long alloc_dom_mem(struct task_st 2.59 2.60 static long free_dom_mem(struct task_struct *p, reservation_decrease_t op) 2.61 { 2.62 - struct pfn_info *page; 2.63 - unsigned long mpfn; /* machine frame number of current page */ 2.64 - unsigned long i; 2.65 - long rc = 0; 2.66 - int need_flush = 0; 2.67 + struct pfn_info *page; 2.68 + unsigned long i, mpfn; 2.69 + long rc = 0; 2.70 2.71 for ( i = 0; i < op.size; i++ ) 2.72 { 2.73 - copy_from_user(&mpfn, op.pages, sizeof(mpfn)); 2.74 - op.pages++; 2.75 - if ( mpfn >= max_page ) 2.76 + if ( unlikely(get_user(mpfn, &op.pages[i]) != 0) ) 2.77 + break; 2.78 + 2.79 + if ( unlikely(mpfn >= max_page) ) 2.80 { 2.81 DPRINTK("Domain %llu page number out of range (%08lx>=%08lx)\n", 2.82 p->domain, mpfn, max_page); 2.83 rc = -EINVAL; 2.84 - goto out; 2.85 + break; 2.86 } 2.87 2.88 page = &frame_table[mpfn]; 2.89 @@ -81,7 +72,7 @@ static long free_dom_mem(struct task_str 2.90 { 2.91 DPRINTK("Bad page free for domain %llu\n", p->domain); 2.92 rc = -EINVAL; 2.93 - goto out; 2.94 + break; 2.95 } 2.96 2.97 if ( test_and_clear_bit(_PGC_guest_pinned, &page->count_and_flags) ) 2.98 @@ -93,13 +84,6 @@ static long free_dom_mem(struct task_str 2.99 put_page(page); 2.100 } 2.101 2.102 - out: 2.103 - if ( need_flush ) 2.104 - { 2.105 - __flush_tlb(); 2.106 - perfc_incr(need_flush_tlb_flush); 2.107 - } 2.108 - 2.109 return rc ? rc : op.size; 2.110 } 2.111
3.1 --- a/xen/common/domain.c Thu May 13 09:59:09 2004 +0000 3.2 +++ b/xen/common/domain.c Thu May 13 15:09:41 2004 +0000 3.3 @@ -324,8 +324,7 @@ struct pfn_info *alloc_domain_page(struc 3.4 page->type_and_flags = 0; 3.5 if ( p != NULL ) 3.6 { 3.7 - if ( unlikely(in_irq()) ) 3.8 - BUG(); 3.9 + ASSERT(!in_irq()); 3.10 wmb(); /* Domain pointer must be visible before updating refcnt. */ 3.11 spin_lock(&p->page_list_lock); 3.12 if ( unlikely(p->tot_pages >= p->max_pages) ) 3.13 @@ -369,7 +368,7 @@ void free_domain_page(struct pfn_info *p 3.14 if ( !(page->count_and_flags & PGC_zombie) ) 3.15 { 3.16 page->tlbflush_timestamp = tlbflush_clock; 3.17 - if (p) 3.18 + if ( likely(p != NULL) ) 3.19 { 3.20 page->u.cpu_mask = 1 << p->processor; 3.21 spin_lock(&p->page_list_lock);
4.1 --- a/xen/common/memory.c Thu May 13 09:59:09 2004 +0000 4.2 +++ b/xen/common/memory.c Thu May 13 15:09:41 2004 +0000 4.3 @@ -1209,6 +1209,33 @@ int do_update_va_mapping(unsigned long p 4.4 return err; 4.5 } 4.6 4.7 +int do_update_va_mapping_otherdomain(unsigned long page_nr, 4.8 + unsigned long val, 4.9 + unsigned long flags, 4.10 + domid_t domid) 4.11 +{ 4.12 + unsigned int cpu = smp_processor_id(); 4.13 + struct task_struct *p; 4.14 + int rc; 4.15 + 4.16 + if ( unlikely(!IS_PRIV(current)) ) 4.17 + return -EPERM; 4.18 + 4.19 + percpu_info[cpu].gps = p = find_domain_by_id(domid); 4.20 + if ( unlikely(p == NULL) ) 4.21 + { 4.22 + MEM_LOG("Unknown domain '%llu'", domid); 4.23 + return -ESRCH; 4.24 + } 4.25 + 4.26 + rc = do_update_va_mapping(page_nr, val, flags); 4.27 + 4.28 + put_task_struct(p); 4.29 + percpu_info[cpu].gps = NULL; 4.30 + 4.31 + return rc; 4.32 +} 4.33 + 4.34 4.35 #ifndef NDEBUG 4.36 /*
5.1 --- a/xen/include/hypervisor-ifs/hypervisor-if.h Thu May 13 09:59:09 2004 +0000 5.2 +++ b/xen/include/hypervisor-ifs/hypervisor-if.h Thu May 13 15:09:41 2004 +0000 5.3 @@ -39,6 +39,7 @@ 5.4 #define __HYPERVISOR_xen_version 22 5.5 #define __HYPERVISOR_console_io 23 5.6 #define __HYPERVISOR_physdev_op 24 5.7 +#define __HYPERVISOR_update_va_mapping_otherdomain 25 5.8 5.9 /* 5.10 * MULTICALLS
6.1 --- a/xenolinux-2.4.26-sparse/arch/xen/drivers/netif/backend/main.c Thu May 13 09:59:09 2004 +0000 6.2 +++ b/xenolinux-2.4.26-sparse/arch/xen/drivers/netif/backend/main.c Thu May 13 15:09:41 2004 +0000 6.3 @@ -100,8 +100,8 @@ int netif_be_start_xmit(struct sk_buff * 6.4 netif_t *netif = (netif_t *)dev->priv; 6.5 s8 status = NETIF_RSP_OKAY; 6.6 u16 size=0, id; 6.7 - mmu_update_t mmu[6]; 6.8 - pgd_t *pgd; pmd_t *pmd; pte_t *pte; 6.9 + mmu_update_t mmu[4]; 6.10 + multicall_entry_t mcl[2]; 6.11 unsigned long vdata, mdata=0, new_mfn; 6.12 6.13 /* Drop the packet if the target domain has no receive buffers. */ 6.14 @@ -148,34 +148,37 @@ int netif_be_start_xmit(struct sk_buff * 6.15 6.16 new_mfn = get_new_mfn(); 6.17 6.18 - pgd = pgd_offset_k( (vdata & PAGE_MASK)); 6.19 - pmd = pmd_offset(pgd, (vdata & PAGE_MASK)); 6.20 - pte = pte_offset(pmd, (vdata & PAGE_MASK)); 6.21 + mmu[0].ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; 6.22 + mmu[0].val = __pa(vdata) >> PAGE_SHIFT; 6.23 6.24 - mmu[0].val = (unsigned long)(netif->domid<<16) & ~0xFFFFUL; 6.25 - mmu[0].ptr = (unsigned long)(netif->domid<< 0) & ~0xFFFFUL; 6.26 - mmu[1].val = (unsigned long)(netif->domid>>16) & ~0xFFFFUL; 6.27 - mmu[1].ptr = (unsigned long)(netif->domid>>32) & ~0xFFFFUL; 6.28 - mmu[0].ptr |= MMU_EXTENDED_COMMAND; 6.29 - mmu[0].val |= MMUEXT_SET_SUBJECTDOM_L; 6.30 + mmu[1].val = (unsigned long)(netif->domid<<16) & ~0xFFFFUL; 6.31 + mmu[1].ptr = (unsigned long)(netif->domid<< 0) & ~0xFFFFUL; 6.32 + mmu[2].val = (unsigned long)(netif->domid>>16) & ~0xFFFFUL; 6.33 + mmu[2].ptr = (unsigned long)(netif->domid>>32) & ~0xFFFFUL; 6.34 mmu[1].ptr |= MMU_EXTENDED_COMMAND; 6.35 - mmu[1].val |= MMUEXT_SET_SUBJECTDOM_H; 6.36 + mmu[1].val |= MMUEXT_SET_SUBJECTDOM_L; 6.37 + mmu[2].ptr |= MMU_EXTENDED_COMMAND; 6.38 + mmu[2].val |= MMUEXT_SET_SUBJECTDOM_H; 6.39 6.40 - mmu[2].ptr = (mdata & PAGE_MASK) | MMU_EXTENDED_COMMAND; 6.41 - mmu[2].val = MMUEXT_REASSIGN_PAGE; 6.42 - 6.43 - mmu[3].ptr = MMU_EXTENDED_COMMAND; 6.44 - mmu[3].val = MMUEXT_RESET_SUBJECTDOM; 6.45 + mmu[3].ptr = (mdata & PAGE_MASK) | MMU_EXTENDED_COMMAND; 6.46 + mmu[3].val = MMUEXT_REASSIGN_PAGE; 6.47 6.48 - mmu[4].ptr = virt_to_machine(pte); 6.49 - mmu[4].val = (new_mfn << PAGE_SHIFT) | __PAGE_KERNEL; 6.50 + mcl[0].op = __HYPERVISOR_mmu_update; 6.51 + mcl[0].args[0] = (unsigned long)mmu; 6.52 + mcl[0].args[1] = 4; 6.53 + mcl[1].op = __HYPERVISOR_update_va_mapping; 6.54 + mcl[1].args[0] = vdata >> PAGE_SHIFT; 6.55 + mcl[1].args[1] = (new_mfn << PAGE_SHIFT) | __PAGE_KERNEL; 6.56 + mcl[1].args[2] = UVMF_INVLPG; 6.57 6.58 - mmu[5].ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; 6.59 - mmu[5].val = __pa(vdata) >> PAGE_SHIFT; 6.60 - 6.61 - if ( unlikely(HYPERVISOR_mmu_update(mmu, 6) < 0) ) 6.62 + (void)HYPERVISOR_multicall(mcl, 2); 6.63 + if ( mcl[0].args[5] != 0 ) 6.64 { 6.65 DPRINTK("Failed MMU update transferring to DOM%llu\n", netif->domid); 6.66 + (void)HYPERVISOR_update_va_mapping( 6.67 + vdata >> PAGE_SHIFT, 6.68 + (pte_t) { (mdata & PAGE_MASK) | __PAGE_KERNEL }, 6.69 + UVMF_INVLPG); 6.70 dealloc_mfn(new_mfn); 6.71 status = NETIF_RSP_ERROR; 6.72 goto out; 6.73 @@ -183,6 +186,10 @@ int netif_be_start_xmit(struct sk_buff * 6.74 6.75 phys_to_machine_mapping[__pa(vdata) >> PAGE_SHIFT] = new_mfn; 6.76 6.77 + atomic_set(&(skb_shinfo(skb)->dataref), 1); 6.78 + skb_shinfo(skb)->nr_frags = 0; 6.79 + skb_shinfo(skb)->frag_list = NULL; 6.80 + 6.81 netif->stats.rx_bytes += size; 6.82 netif->stats.rx_packets++; 6.83 6.84 @@ -261,7 +268,6 @@ static void net_tx_action(unsigned long 6.85 netif_tx_request_t txreq; 6.86 u16 pending_idx; 6.87 NETIF_RING_IDX i; 6.88 - pgprot_t prot = __pgprot(_PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED); 6.89 struct page *page; 6.90 6.91 while ( (NR_PENDING_REQS < MAX_PENDING_REQS) && 6.92 @@ -334,10 +340,10 @@ static void net_tx_action(unsigned long 6.93 6.94 pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)]; 6.95 6.96 - if ( direct_remap_area_pages(&init_mm, 6.97 - MMAP_VADDR(pending_idx), 6.98 - txreq.addr & PAGE_MASK, 6.99 - PAGE_SIZE, prot, netif->domid) != 0 ) 6.100 + if ( HYPERVISOR_update_va_mapping_otherdomain( 6.101 + MMAP_VADDR(pending_idx) >> PAGE_SHIFT, 6.102 + (pte_t) { (txreq.addr & PAGE_MASK) | __PAGE_KERNEL }, 6.103 + 0, netif->domid) != 0 ) 6.104 { 6.105 DPRINTK("Bad page frame\n"); 6.106 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR); 6.107 @@ -352,7 +358,8 @@ static void net_tx_action(unsigned long 6.108 DPRINTK("Can't allocate a skb in start_xmit.\n"); 6.109 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR); 6.110 netif_put(netif); 6.111 - vmfree_area_pages(MMAP_VADDR(pending_idx), PAGE_SIZE); 6.112 + HYPERVISOR_update_va_mapping(MMAP_VADDR(pending_idx) >> PAGE_SHIFT, 6.113 + (pte_t) { 0 }, UVMF_INVLPG); 6.114 break; 6.115 } 6.116 6.117 @@ -401,7 +408,8 @@ static void netif_page_release(struct pa 6.118 6.119 netif = pending_netif[pending_idx]; 6.120 6.121 - vmfree_area_pages(MMAP_VADDR(pending_idx), PAGE_SIZE); 6.122 + HYPERVISOR_update_va_mapping(MMAP_VADDR(pending_idx) >> PAGE_SHIFT, 6.123 + (pte_t) { 0 }, UVMF_INVLPG); 6.124 6.125 spin_lock(&netif->tx_lock); 6.126 make_tx_response(netif, pending_id[pending_idx], NETIF_RSP_OKAY);
7.1 --- a/xenolinux-2.4.26-sparse/arch/xen/drivers/netif/frontend/main.c Thu May 13 09:59:09 2004 +0000 7.2 +++ b/xenolinux-2.4.26-sparse/arch/xen/drivers/netif/frontend/main.c Thu May 13 15:09:41 2004 +0000 7.3 @@ -171,16 +171,6 @@ static void network_tx_buf_gc(struct net 7.4 } 7.5 7.6 7.7 -static inline pte_t *get_ppte(void *addr) 7.8 -{ 7.9 - pgd_t *pgd; pmd_t *pmd; pte_t *pte; 7.10 - pgd = pgd_offset_k( (unsigned long)addr); 7.11 - pmd = pmd_offset(pgd, (unsigned long)addr); 7.12 - pte = pte_offset(pmd, (unsigned long)addr); 7.13 - return pte; 7.14 -} 7.15 - 7.16 - 7.17 static void network_alloc_rx_buffers(struct net_device *dev) 7.18 { 7.19 unsigned short id; 7.20 @@ -190,7 +180,6 @@ static void network_alloc_rx_buffers(str 7.21 dom_mem_op_t op; 7.22 unsigned long pfn_array[NETIF_RX_RING_SIZE]; 7.23 int ret, nr_pfns = 0; 7.24 - pte_t *pte; 7.25 7.26 /* Make sure the batch is large enough to be worthwhile (1/2 ring). */ 7.27 if ( unlikely((i - np->rx_resp_cons) > (NETIF_RX_RING_SIZE/2)) || 7.28 @@ -212,9 +201,9 @@ static void network_alloc_rx_buffers(str 7.29 7.30 np->rx->ring[MASK_NET_RX_IDX(i)].req.id = id; 7.31 7.32 - pte = get_ppte(skb->head); 7.33 - pfn_array[nr_pfns++] = pte->pte_low >> PAGE_SHIFT; 7.34 - queue_l1_entry_update(pte, 0); 7.35 + pfn_array[nr_pfns++] = virt_to_machine(skb->head) >> PAGE_SHIFT; 7.36 + HYPERVISOR_update_va_mapping((unsigned long)skb->head >> PAGE_SHIFT, 7.37 + (pte_t) { 0 }, UVMF_INVLPG); 7.38 } 7.39 while ( (++i - np->rx_resp_cons) != NETIF_RX_RING_SIZE ); 7.40 7.41 @@ -309,8 +298,7 @@ static void netif_int(int irq, void *dev 7.42 struct sk_buff *skb; 7.43 netif_rx_response_t *rx; 7.44 NETIF_RING_IDX i; 7.45 - mmu_update_t mmu[2]; 7.46 - pte_t *pte; 7.47 + mmu_update_t mmu; 7.48 7.49 spin_lock_irqsave(&np->tx_lock, flags); 7.50 network_tx_buf_gc(dev); 7.51 @@ -334,13 +322,14 @@ static void netif_int(int irq, void *dev 7.52 } 7.53 7.54 /* Remap the page. */ 7.55 - pte = get_ppte(skb->head); 7.56 - mmu[0].ptr = virt_to_machine(pte); 7.57 - mmu[0].val = (rx->addr & PAGE_MASK) | __PAGE_KERNEL; 7.58 - mmu[1].ptr = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE; 7.59 - mmu[1].val = __pa(skb->head) >> PAGE_SHIFT; 7.60 - if ( HYPERVISOR_mmu_update(mmu, 2) != 0 ) 7.61 + mmu.ptr = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE; 7.62 + mmu.val = __pa(skb->head) >> PAGE_SHIFT; 7.63 + if ( HYPERVISOR_mmu_update(&mmu, 1) != 0 ) 7.64 BUG(); 7.65 + HYPERVISOR_update_va_mapping((unsigned long)skb->head >> PAGE_SHIFT, 7.66 + (pte_t) { (rx->addr & PAGE_MASK) | 7.67 + __PAGE_KERNEL }, 7.68 + 0); 7.69 phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = 7.70 rx->addr >> PAGE_SHIFT; 7.71 7.72 @@ -352,9 +341,6 @@ static void netif_int(int irq, void *dev 7.73 atomic_set(&(skb_shinfo(skb)->dataref), 1); 7.74 skb_shinfo(skb)->nr_frags = 0; 7.75 skb_shinfo(skb)->frag_list = NULL; 7.76 - 7.77 - phys_to_machine_mapping[virt_to_phys(skb->head) >> PAGE_SHIFT] = 7.78 - (*(unsigned long *)get_ppte(skb->head)) >> PAGE_SHIFT; 7.79 7.80 skb->data = skb->tail = skb->head + (rx->addr & ~PAGE_MASK); 7.81 skb_put(skb, rx->status);
8.1 --- a/xenolinux-2.4.26-sparse/include/asm-xen/hypervisor.h Thu May 13 09:59:09 2004 +0000 8.2 +++ b/xenolinux-2.4.26-sparse/include/asm-xen/hypervisor.h Thu May 13 15:09:41 2004 +0000 8.3 @@ -459,4 +459,18 @@ static inline int HYPERVISOR_physdev_op( 8.4 return ret; 8.5 } 8.6 8.7 +static inline int HYPERVISOR_update_va_mapping_otherdomain( 8.8 + unsigned long page_nr, pte_t new_val, unsigned long flags, domid_t domid) 8.9 +{ 8.10 + int ret; 8.11 + __asm__ __volatile__ ( 8.12 + TRAP_INSTR 8.13 + : "=a" (ret) : "0" (__HYPERVISOR_update_va_mapping_otherdomain), 8.14 + "b" (page_nr), "c" ((new_val).pte_low), "d" (flags), 8.15 + "S" ((unsigned long)domid), "D" ((unsigned long)(domid>>32)) : 8.16 + "memory" ); 8.17 + 8.18 + return ret; 8.19 +} 8.20 + 8.21 #endif /* __HYPERVISOR_H__ */