direct-io.hg
changeset 2292:c8a29df316b8
bitkeeper revision 1.1159.42.2 (4124b039yQGTqk4tjyHEoXGty5VlPg)
Fix use of the phys_to_machine_mapping table in Linux 2.4 and 2.6.
We now ensure that the table contains no MFNs that do not belong
the OS --- invalid entries contain a sentinel value; deliberate
foreign mappings have the high bit set. This means the pte_page() and
pte_pfn() will do the right thing despite possible aliasing in the
M2P table.
Fix use of the phys_to_machine_mapping table in Linux 2.4 and 2.6.
We now ensure that the table contains no MFNs that do not belong
the OS --- invalid entries contain a sentinel value; deliberate
foreign mappings have the high bit set. This means the pte_page() and
pte_pfn() will do the right thing despite possible aliasing in the
M2P table.
line diff
1.1 --- a/linux-2.4.26-xen-sparse/arch/xen/drivers/balloon/balloon.c Thu Aug 19 12:10:55 2004 +0000 1.2 +++ b/linux-2.4.26-xen-sparse/arch/xen/drivers/balloon/balloon.c Thu Aug 19 13:50:49 2004 +0000 1.3 @@ -36,13 +36,16 @@ typedef struct user_balloon_op { 1.4 } user_balloon_op_t; 1.5 /* END OF USER DEFINE */ 1.6 1.7 -/* Dead entry written into balloon-owned entries in the PMT. */ 1.8 -#define DEAD 0xdeadbeef 1.9 - 1.10 static struct proc_dir_entry *balloon_pde; 1.11 unsigned long credit; 1.12 static unsigned long current_pages, most_seen_pages; 1.13 1.14 +/* 1.15 + * Dead entry written into balloon-owned entries in the PMT. 1.16 + * It is deliberately different to INVALID_P2M_ENTRY. 1.17 + */ 1.18 +#define DEAD 0xdead1234 1.19 + 1.20 static inline pte_t *get_ptep(unsigned long addr) 1.21 { 1.22 pgd_t *pgd; pmd_t *pmd; pte_t *ptep; 1.23 @@ -79,17 +82,16 @@ static unsigned long inflate_balloon(uns 1.24 for ( i = 0; i < num_pages; i++, currp++ ) 1.25 { 1.26 struct page *page = alloc_page(GFP_HIGHUSER); 1.27 - unsigned long pfn = page - mem_map; 1.28 + unsigned long pfn = page - mem_map; 1.29 1.30 /* If allocation fails then free all reserved pages. */ 1.31 - if ( page == 0 ) 1.32 + if ( page == NULL ) 1.33 { 1.34 - printk(KERN_ERR "Unable to inflate balloon by %ld, only %ld pages free.", 1.35 - num_pages, i); 1.36 + printk(KERN_ERR "Unable to inflate balloon by %ld, only" 1.37 + " %ld pages free.", num_pages, i); 1.38 currp = parray; 1.39 - for(j = 0; j < i; j++, ++currp){ 1.40 + for ( j = 0; j < i; j++, currp++ ) 1.41 __free_page((struct page *) (mem_map + *currp)); 1.42 - } 1.43 ret = -EFAULT; 1.44 goto cleanup; 1.45 } 1.46 @@ -102,9 +104,8 @@ static unsigned long inflate_balloon(uns 1.47 { 1.48 unsigned long mfn = phys_to_machine_mapping[*currp]; 1.49 curraddr = (unsigned long)page_address(mem_map + *currp); 1.50 - if (curraddr) 1.51 + if ( curraddr != 0 ) 1.52 queue_l1_entry_update(get_ptep(curraddr), 0); 1.53 - 1.54 phys_to_machine_mapping[*currp] = DEAD; 1.55 *currp = mfn; 1.56 } 1.57 @@ -313,17 +314,18 @@ claim_new_pages(unsigned long num_pages) 1.58 XEN_flush_page_update_queue(); 1.59 new_page_cnt = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation, 1.60 parray, num_pages, 0); 1.61 - if (new_page_cnt != num_pages) 1.62 + if ( new_page_cnt != num_pages ) 1.63 { 1.64 printk(KERN_WARNING 1.65 "claim_new_pages: xen granted only %lu of %lu requested pages\n", 1.66 new_page_cnt, num_pages); 1.67 1.68 - /* XXX 1.69 - * avoid xen lockup when user forgot to setdomainmaxmem. xen 1.70 - * usually can dribble out a few pages and then hangs 1.71 + /* 1.72 + * Avoid xen lockup when user forgot to setdomainmaxmem. Xen 1.73 + * usually can dribble out a few pages and then hangs. 1.74 */ 1.75 - if (new_page_cnt < 1000) { 1.76 + if ( new_page_cnt < 1000 ) 1.77 + { 1.78 printk(KERN_WARNING "Remember to use setdomainmaxmem\n"); 1.79 HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 1.80 parray, new_page_cnt, 0); 1.81 @@ -331,7 +333,7 @@ claim_new_pages(unsigned long num_pages) 1.82 } 1.83 } 1.84 memcpy(phys_to_machine_mapping+most_seen_pages, parray, 1.85 - new_page_cnt * sizeof(unsigned long)); 1.86 + new_page_cnt * sizeof(unsigned long)); 1.87 1.88 pagetable_extend(most_seen_pages,new_page_cnt); 1.89 1.90 @@ -465,12 +467,15 @@ static int __init init_module(void) 1.91 /* 1.92 * make a new phys map if mem= says xen can give us memory to grow 1.93 */ 1.94 - if (max_pfn > start_info.nr_pages) { 1.95 + if ( max_pfn > start_info.nr_pages ) 1.96 + { 1.97 extern unsigned long *phys_to_machine_mapping; 1.98 unsigned long *newmap; 1.99 newmap = (unsigned long *)vmalloc(max_pfn * sizeof(unsigned long)); 1.100 - phys_to_machine_mapping = memcpy(newmap, phys_to_machine_mapping, 1.101 - start_info.nr_pages * sizeof(unsigned long)); 1.102 + memset(newmap, ~0, max_pfn * sizeof(unsigned long)); 1.103 + memcpy(newmap, phys_to_machine_mapping, 1.104 + start_info.nr_pages * sizeof(unsigned long)); 1.105 + phys_to_machine_mapping = newmap; 1.106 } 1.107 1.108 return 0;
2.1 --- a/linux-2.4.26-xen-sparse/include/asm-xen/pgtable-2level.h Thu Aug 19 12:10:55 2004 +0000 2.2 +++ b/linux-2.4.26-xen-sparse/include/asm-xen/pgtable-2level.h Thu Aug 19 13:50:49 2004 +0000 2.3 @@ -58,7 +58,19 @@ static inline pmd_t * pmd_offset(pgd_t * 2.4 * then we'll have p2m(m2p(MFN))==MFN. 2.5 * If we detect a special mapping then it doesn't have a 'struct page'. 2.6 * We force !VALID_PAGE() by returning an out-of-range pointer. 2.7 + * 2.8 + * NB. These checks require that, for any MFN that is not in our reservation, 2.9 + * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if 2.10 + * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. 2.11 + * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. 2.12 + * 2.13 + * NB2. When deliberately mapping foreign pages into the p2m table, you *must* 2.14 + * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we 2.15 + * require. In all the cases we care about, the high bit gets shifted out 2.16 + * (e.g., phys_to_machine()) so behaviour there is correct. 2.17 */ 2.18 +#define INVALID_P2M_ENTRY (~0UL) 2.19 +#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1))) 2.20 #define pte_page(_pte) \ 2.21 ({ \ 2.22 unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT; \
3.1 --- a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/pci-dma.c Thu Aug 19 12:10:55 2004 +0000 3.2 +++ b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/pci-dma.c Thu Aug 19 13:50:49 2004 +0000 3.3 @@ -61,6 +61,8 @@ void *dma_alloc_coherent(struct device * 3.4 pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 3.5 pfn = pte->pte_low >> PAGE_SHIFT; 3.6 queue_l1_entry_update(pte, 0); 3.7 + phys_to_machine_mapping[(__pa(ret)>>PAGE_SHIFT)+i] = 3.8 + INVALID_P2M_ENTRY; 3.9 flush_page_update_queue(); 3.10 if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 3.11 &pfn, 1, 0) != 1) BUG(); 3.12 @@ -79,7 +81,6 @@ void *dma_alloc_coherent(struct device * 3.13 pfn+i, (__pa(ret)>>PAGE_SHIFT)+i); 3.14 phys_to_machine_mapping[(__pa(ret)>>PAGE_SHIFT)+i] = 3.15 pfn+i; 3.16 - flush_page_update_queue(); 3.17 } 3.18 flush_page_update_queue(); 3.19 }
4.1 --- a/linux-2.6.7-xen-sparse/arch/xen/i386/mm/hypervisor.c Thu Aug 19 12:10:55 2004 +0000 4.2 +++ b/linux-2.6.7-xen-sparse/arch/xen/i386/mm/hypervisor.c Thu Aug 19 13:50:49 2004 +0000 4.3 @@ -299,7 +299,7 @@ unsigned long allocate_empty_lowmem_regi 4.4 pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 4.5 pfn_array[i] = pte->pte_low >> PAGE_SHIFT; 4.6 queue_l1_entry_update(pte, 0); 4.7 - phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = 0xdeadbeef; 4.8 + phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = INVALID_P2M_ENTRY; 4.9 } 4.10 4.11 flush_page_update_queue();
5.1 --- a/linux-2.6.7-xen-sparse/drivers/xen/blkback/blkback.c Thu Aug 19 12:10:55 2004 +0000 5.2 +++ b/linux-2.6.7-xen-sparse/drivers/xen/blkback/blkback.c Thu Aug 19 13:50:49 2004 +0000 5.3 @@ -415,7 +415,7 @@ static void dispatch_rw_block_io(blkif_t 5.4 mcl[i].args[3] = blkif->domid; 5.5 5.6 phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] = 5.7 - phys_seg[i].buffer >> PAGE_SHIFT; 5.8 + FOREIGN_FRAME(phys_seg[i].buffer >> PAGE_SHIFT); 5.9 } 5.10 5.11 if ( unlikely(HYPERVISOR_multicall(mcl, nr_psegs) != 0) )
6.1 --- a/linux-2.6.7-xen-sparse/drivers/xen/blkfront/blkfront.c Thu Aug 19 12:10:55 2004 +0000 6.2 +++ b/linux-2.6.7-xen-sparse/drivers/xen/blkfront/blkfront.c Thu Aug 19 13:50:49 2004 +0000 6.3 @@ -1,5 +1,5 @@ 6.4 /****************************************************************************** 6.5 - * block.c 6.6 + * blkfront.c 6.7 * 6.8 * XenLinux virtual block-device driver. 6.9 * 6.10 @@ -67,11 +67,12 @@ static inline int GET_ID_FROM_FREELIST( 6.11 { 6.12 unsigned long free = rec_ring_free; 6.13 6.14 - if(free>BLKIF_RING_SIZE) BUG(); 6.15 + if ( free > BLKIF_RING_SIZE ) 6.16 + BUG(); 6.17 6.18 rec_ring_free = rec_ring[free].id; 6.19 6.20 - rec_ring[free].id = 0x0fffffee; // debug 6.21 + rec_ring[free].id = 0x0fffffee; /* debug */ 6.22 6.23 return free; 6.24 } 6.25 @@ -253,8 +254,6 @@ static int blkif_queue_request(struct re 6.26 id = GET_ID_FROM_FREELIST(); 6.27 rec_ring[id].id = (unsigned long) req; 6.28 6.29 -//printk(KERN_ALERT"r: %d req %p (%ld)\n",req_prod,req,id); 6.30 - 6.31 ring_req->id = id; 6.32 ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : 6.33 BLKIF_OP_READ; 6.34 @@ -300,8 +299,6 @@ void do_blkif_request(request_queue_t *r 6.35 6.36 DPRINTK("Entered do_blkif_request\n"); 6.37 6.38 -//printk(KERN_ALERT"r: %d req\n",req_prod); 6.39 - 6.40 queued = 0; 6.41 6.42 while ((req = elv_next_request(rq)) != NULL) { 6.43 @@ -310,7 +307,8 @@ void do_blkif_request(request_queue_t *r 6.44 continue; 6.45 } 6.46 6.47 - if (BLKIF_RING_FULL) { 6.48 + if ( BLKIF_RING_FULL ) 6.49 + { 6.50 blk_stop_queue(rq); 6.51 break; 6.52 } 6.53 @@ -358,11 +356,9 @@ static irqreturn_t blkif_int(int irq, vo 6.54 id = bret->id; 6.55 req = (struct request *)rec_ring[id].id; 6.56 6.57 -//printk(KERN_ALERT"i: %d req %p (%ld)\n",i,req,id); 6.58 - 6.59 blkif_completion( &rec_ring[id] ); 6.60 6.61 - ADD_ID_TO_FREELIST(id); // overwrites req 6.62 + ADD_ID_TO_FREELIST(id); /* overwrites req */ 6.63 6.64 switch ( bret->operation ) 6.65 { 6.66 @@ -772,8 +768,6 @@ static int blkif_queue_request(unsigned 6.67 req->nr_segments = 1; 6.68 req->frame_and_sects[0] = buffer_ma | (fsect<<3) | lsect; 6.69 6.70 -//printk("N: %d req %p (%ld)\n",req_prod,rec_ring[xid].id,xid); 6.71 - 6.72 req_prod++; 6.73 6.74 /* Keep a private copy so we can reissue requests when recovering. */ 6.75 @@ -892,8 +886,6 @@ static void blkif_int(int irq, void *dev 6.76 id = bret->id; 6.77 bh = (struct buffer_head *)rec_ring[id].id; 6.78 6.79 -//printk("i: %d req %p (%ld)\n",i,bh,id); 6.80 - 6.81 blkif_completion( &rec_ring[id] ); 6.82 6.83 ADD_ID_TO_FREELIST(id); 6.84 @@ -942,16 +934,11 @@ static inline void translate_req_to_pfn( 6.85 xreq->operation = req->operation; 6.86 xreq->nr_segments = req->nr_segments; 6.87 xreq->device = req->device; 6.88 - // preserve id 6.89 + /* preserve id */ 6.90 xreq->sector_number = req->sector_number; 6.91 6.92 for ( i = 0; i < req->nr_segments; i++ ) 6.93 - { 6.94 - xreq->frame_and_sects[i] = (req->frame_and_sects[i] & ~PAGE_MASK) | 6.95 - (machine_to_phys_mapping[req->frame_and_sects[i] >> PAGE_SHIFT] << 6.96 - PAGE_SHIFT); 6.97 - } 6.98 - 6.99 + xreq->frame_and_sects[i] = machine_to_phys(req->frame_and_sects[i]); 6.100 } 6.101 6.102 static inline void translate_req_to_mfn(blkif_request_t *xreq, 6.103 @@ -962,15 +949,11 @@ static inline void translate_req_to_mfn( 6.104 xreq->operation = req->operation; 6.105 xreq->nr_segments = req->nr_segments; 6.106 xreq->device = req->device; 6.107 - xreq->id = req->id; // copy id (unlike above) 6.108 + xreq->id = req->id; /* copy id (unlike above) */ 6.109 xreq->sector_number = req->sector_number; 6.110 6.111 for ( i = 0; i < req->nr_segments; i++ ) 6.112 - { 6.113 - xreq->frame_and_sects[i] = (req->frame_and_sects[i] & ~PAGE_MASK) | 6.114 - (phys_to_machine_mapping[req->frame_and_sects[i] >> PAGE_SHIFT] << 6.115 - PAGE_SHIFT); 6.116 - } 6.117 + xreq->frame_and_sects[i] = phys_to_machine(req->frame_and_sects[i]); 6.118 } 6.119 6.120 6.121 @@ -978,7 +961,6 @@ static inline void translate_req_to_mfn( 6.122 static inline void flush_requests(void) 6.123 { 6.124 DISABLE_SCATTERGATHER(); 6.125 -//printk(KERN_ALERT"flush %d\n",req_prod); 6.126 wmb(); /* Ensure that the frontend can see the requests. */ 6.127 blk_ring->req_prod = req_prod; 6.128 notify_via_evtchn(blkif_evtchn); 6.129 @@ -1010,8 +992,6 @@ void blkif_control_send(blkif_request_t 6.130 blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req.id = id; 6.131 rec_ring[id].id = (unsigned long) req; 6.132 6.133 -//printk("c: %d req %p (%ld)\n",req_prod,req,id); 6.134 - 6.135 translate_req_to_pfn( &rec_ring[id], req ); 6.136 6.137 req_prod++; 6.138 @@ -1094,13 +1074,13 @@ static void blkif_status_change(blkif_fe 6.139 " in state %d\n", blkif_state); 6.140 break; 6.141 } 6.142 + 6.143 blkif_evtchn = status->evtchn; 6.144 - blkif_irq = bind_evtchn_to_irq(blkif_evtchn); 6.145 - if ( (rc=request_irq(blkif_irq, blkif_int, 6.146 - SA_SAMPLE_RANDOM, "blkif", NULL)) ) 6.147 - { 6.148 + blkif_irq = bind_evtchn_to_irq(blkif_evtchn); 6.149 + 6.150 + if ( (rc = request_irq(blkif_irq, blkif_int, 6.151 + SA_SAMPLE_RANDOM, "blkif", NULL)) ) 6.152 printk(KERN_ALERT"blkfront request_irq failed (%ld)\n",rc); 6.153 - } 6.154 6.155 if ( recovery ) 6.156 { 6.157 @@ -1109,31 +1089,28 @@ static void blkif_status_change(blkif_fe 6.158 /* Hmm, requests might be re-ordered when we re-issue them. 6.159 This will need to be fixed once we have barriers */ 6.160 6.161 - // req_prod = 0; : already is zero 6.162 - 6.163 - // stage 1 : find active and move to safety 6.164 - for ( i=0; i <BLKIF_RING_SIZE; i++ ) 6.165 + /* Stage 1 : Find active and move to safety. */ 6.166 + for ( i = 0; i < BLKIF_RING_SIZE; i++ ) 6.167 { 6.168 if ( rec_ring[i].id >= PAGE_OFFSET ) 6.169 { 6.170 translate_req_to_mfn( 6.171 - &blk_ring->ring[req_prod].req, &rec_ring[i] ); 6.172 - 6.173 + &blk_ring->ring[req_prod].req, &rec_ring[i]); 6.174 req_prod++; 6.175 } 6.176 } 6.177 6.178 -printk(KERN_ALERT"blkfront: recovered %d descriptors\n",req_prod); 6.179 + printk(KERN_ALERT"blkfront: recovered %d descriptors\n",req_prod); 6.180 6.181 - // stage 2 : set up shadow list 6.182 - for ( i=0; i<req_prod; i++ ) 6.183 + /* Stage 2 : Set up shadow list. */ 6.184 + for ( i = 0; i < req_prod; i++ ) 6.185 { 6.186 rec_ring[i].id = blk_ring->ring[i].req.id; 6.187 blk_ring->ring[i].req.id = i; 6.188 - translate_req_to_pfn( &rec_ring[i], &blk_ring->ring[i].req ); 6.189 + translate_req_to_pfn(&rec_ring[i], &blk_ring->ring[i].req); 6.190 } 6.191 6.192 - // stage 3 : set up free list 6.193 + /* Stage 3 : Set up free list. */ 6.194 for ( ; i < BLKIF_RING_SIZE; i++ ) 6.195 rec_ring[i].id = i+1; 6.196 rec_ring_free = req_prod; 6.197 @@ -1150,9 +1127,6 @@ printk(KERN_ALERT"blkfront: recovered %d 6.198 6.199 /* Kicks things back into life. */ 6.200 flush_requests(); 6.201 - 6.202 - 6.203 - 6.204 } 6.205 else 6.206 { 6.207 @@ -1270,7 +1244,7 @@ void blkdev_resume(void) 6.208 6.209 /* XXXXX THIS IS A TEMPORARY FUNCTION UNTIL WE GET GRANT TABLES */ 6.210 6.211 -void blkif_completion( blkif_request_t *req ) 6.212 +void blkif_completion(blkif_request_t *req) 6.213 { 6.214 int i; 6.215 6.216 @@ -1281,10 +1255,8 @@ void blkif_completion( blkif_request_t * 6.217 { 6.218 unsigned long pfn = req->frame_and_sects[i] >> PAGE_SHIFT; 6.219 unsigned long mfn = phys_to_machine_mapping[pfn]; 6.220 - 6.221 queue_machphys_update(mfn, pfn); 6.222 } 6.223 - 6.224 break; 6.225 } 6.226
7.1 --- a/linux-2.6.7-xen-sparse/drivers/xen/netback/netback.c Thu Aug 19 12:10:55 2004 +0000 7.2 +++ b/linux-2.6.7-xen-sparse/drivers/xen/netback/netback.c Thu Aug 19 13:50:49 2004 +0000 7.3 @@ -204,6 +204,12 @@ static void net_rx_action(unsigned long 7.4 mdata = virt_to_machine(vdata); 7.5 new_mfn = get_new_mfn(); 7.6 7.7 + /* 7.8 + * Set the new P2M table entry before reassigning the old data page. 7.9 + * Heed the comment in pgtable-2level.h:pte_page(). :-) 7.10 + */ 7.11 + phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn; 7.12 + 7.13 mmu[0].ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; 7.14 mmu[0].val = __pa(vdata) >> PAGE_SHIFT; 7.15 mmu[1].ptr = MMU_EXTENDED_COMMAND; 7.16 @@ -250,8 +256,6 @@ static void net_rx_action(unsigned long 7.17 mdata = ((mmu[2].ptr & PAGE_MASK) | 7.18 ((unsigned long)skb->data & ~PAGE_MASK)); 7.19 7.20 - phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn; 7.21 - 7.22 atomic_set(&(skb_shinfo(skb)->dataref), 1); 7.23 skb_shinfo(skb)->nr_frags = 0; 7.24 skb_shinfo(skb)->frag_list = NULL; 7.25 @@ -556,7 +560,7 @@ static void net_tx_action(unsigned long 7.26 } 7.27 7.28 phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] = 7.29 - txreq.addr >> PAGE_SHIFT; 7.30 + FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT); 7.31 7.32 __skb_put(skb, PKT_PROT_LEN); 7.33 memcpy(skb->data,
8.1 --- a/linux-2.6.7-xen-sparse/drivers/xen/netfront/netfront.c Thu Aug 19 12:10:55 2004 +0000 8.2 +++ b/linux-2.6.7-xen-sparse/drivers/xen/netfront/netfront.c Thu Aug 19 13:50:49 2004 +0000 8.3 @@ -255,9 +255,9 @@ static void network_alloc_rx_buffers(str 8.4 8.5 rx_pfn_array[nr_pfns] = virt_to_machine(skb->head) >> PAGE_SHIFT; 8.6 8.7 - /* remove this page from pseudo phys map (migration optimization) */ 8.8 + /* Remove this page from pseudo phys map before passing back to Xen. */ 8.9 phys_to_machine_mapping[virt_to_phys(skb->head) >> PAGE_SHIFT] 8.10 - = 0x80000001; 8.11 + = INVALID_P2M_ENTRY; 8.12 8.13 rx_mcl[nr_pfns].op = __HYPERVISOR_update_va_mapping; 8.14 rx_mcl[nr_pfns].args[0] = (unsigned long)skb->head >> PAGE_SHIFT; 8.15 @@ -470,15 +470,6 @@ static int netif_poll(struct net_device 8.16 mcl->args[2] = 0; 8.17 mcl++; 8.18 (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl); 8.19 - 8.20 -#if 0 8.21 - if (unlikely(rx_mcl[0].args[5] != 0)) 8.22 - printk(KERN_ALERT"Hypercall0 failed %u\n",np->rx->resp_prod); 8.23 - 8.24 - if (unlikely(rx_mcl[1].args[5] != 0)) 8.25 - printk(KERN_ALERT"Hypercall1 failed %u\n",np->rx->resp_prod); 8.26 -#endif 8.27 - 8.28 } 8.29 8.30 while ( (skb = __skb_dequeue(&rxq)) != NULL )
9.1 --- a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h Thu Aug 19 12:10:55 2004 +0000 9.2 +++ b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h Thu Aug 19 13:50:49 2004 +0000 9.3 @@ -88,30 +88,33 @@ static inline pte_t ptep_get_and_clear(p 9.4 * not have MFN in our p2m table. Conversely, if the page is ours, 9.5 * then we'll have p2m(m2p(MFN))==MFN. 9.6 * If we detect a special mapping then it doesn't have a 'struct page'. 9.7 - * We force !VALID_PAGE() by returning an out-of-range pointer. 9.8 + * We force !pfn_valid() by returning an out-of-range pointer. 9.9 + * 9.10 + * NB. These checks require that, for any MFN that is not in our reservation, 9.11 + * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if 9.12 + * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. 9.13 + * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. 9.14 + * 9.15 + * NB2. When deliberately mapping foreign pages into the p2m table, you *must* 9.16 + * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we 9.17 + * require. In all the cases we care about, the high bit gets shifted out 9.18 + * (e.g., phys_to_machine()) so behaviour there is correct. 9.19 */ 9.20 -#define pte_page(_pte) \ 9.21 -({ \ 9.22 - unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT; \ 9.23 - unsigned long pfn = mfn_to_pfn(mfn); \ 9.24 - if ( (pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn) ) \ 9.25 - pfn = max_mapnr; /* special: force !VALID_PAGE() */ \ 9.26 - pfn_to_page(pfn); \ 9.27 -}) 9.28 - 9.29 -#define pte_none(x) (!(x).pte_low) 9.30 -/* See comments above pte_page */ 9.31 -/* XXXcl check pte_present because msync.c:filemap_sync_pte calls 9.32 - * without pte_present check */ 9.33 +#define INVALID_P2M_ENTRY (~0UL) 9.34 +#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1))) 9.35 #define pte_pfn(_pte) \ 9.36 ({ \ 9.37 unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT; \ 9.38 - unsigned long pfn = pte_present(_pte) ? mfn_to_pfn(mfn) : mfn; \ 9.39 + unsigned long pfn = mfn_to_pfn(mfn); \ 9.40 if ( (pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn) ) \ 9.41 pfn = max_mapnr; /* special: force !pfn_valid() */ \ 9.42 pfn; \ 9.43 }) 9.44 9.45 +#define pte_page(_pte) pfn_to_page(pte_pfn(_pte)) 9.46 + 9.47 +#define pte_none(x) (!(x).pte_low) 9.48 + 9.49 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) 9.50 #define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) 9.51 #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))