ia64/xen-unstable
changeset 8254:06b80b837c92
Merged.
author | emellor@leeni.uk.xensource.com |
---|---|
date | Tue Dec 06 16:40:50 2005 +0000 (2005-12-06) |
parents | 95584b819b72 198828cc103b |
children | db500b8cb79a 2b8efe11096b |
files |
line diff
1.1 --- a/extras/mini-os/events.c Tue Dec 06 16:40:43 2005 +0000 1.2 +++ b/extras/mini-os/events.c Tue Dec 06 16:40:50 2005 +0000 1.3 @@ -77,6 +77,7 @@ int bind_virq( u32 virq, void (*handler) 1.4 /* Try to bind the virq to a port */ 1.5 op.cmd = EVTCHNOP_bind_virq; 1.6 op.u.bind_virq.virq = virq; 1.7 + op.u.bind_virq.vcpu = smp_processor_id(); 1.8 1.9 if ( HYPERVISOR_event_channel_op(&op) != 0 ) 1.10 {
2.1 --- a/extras/mini-os/hypervisor.c Tue Dec 06 16:40:43 2005 +0000 2.2 +++ b/extras/mini-os/hypervisor.c Tue Dec 06 16:40:50 2005 +0000 2.3 @@ -39,7 +39,7 @@ void do_hypervisor_callback(struct pt_re 2.4 unsigned int l1i, l2i, port; 2.5 int cpu = 0; 2.6 shared_info_t *s = HYPERVISOR_shared_info; 2.7 - vcpu_info_t *vcpu_info = &s->vcpu_data[cpu]; 2.8 + vcpu_info_t *vcpu_info = &s->vcpu_info[cpu]; 2.9 2.10 vcpu_info->evtchn_upcall_pending = 0; 2.11 2.12 @@ -71,7 +71,7 @@ inline void mask_evtchn(u32 port) 2.13 inline void unmask_evtchn(u32 port) 2.14 { 2.15 shared_info_t *s = HYPERVISOR_shared_info; 2.16 - vcpu_info_t *vcpu_info = &s->vcpu_data[smp_processor_id()]; 2.17 + vcpu_info_t *vcpu_info = &s->vcpu_info[smp_processor_id()]; 2.18 2.19 synch_clear_bit(port, &s->evtchn_mask[0]); 2.20
3.1 --- a/extras/mini-os/include/os.h Tue Dec 06 16:40:43 2005 +0000 3.2 +++ b/extras/mini-os/include/os.h Tue Dec 06 16:40:50 2005 +0000 3.3 @@ -70,7 +70,7 @@ void trap_init(void); 3.4 #define __cli() \ 3.5 do { \ 3.6 vcpu_info_t *_vcpu; \ 3.7 - _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \ 3.8 + _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 3.9 _vcpu->evtchn_upcall_mask = 1; \ 3.10 barrier(); \ 3.11 } while (0) 3.12 @@ -79,7 +79,7 @@ do { \ 3.13 do { \ 3.14 vcpu_info_t *_vcpu; \ 3.15 barrier(); \ 3.16 - _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \ 3.17 + _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 3.18 _vcpu->evtchn_upcall_mask = 0; \ 3.19 barrier(); /* unmask then check (avoid races) */ \ 3.20 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ 3.21 @@ -89,7 +89,7 @@ do { \ 3.22 #define __save_flags(x) \ 3.23 do { \ 3.24 vcpu_info_t *_vcpu; \ 3.25 - _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \ 3.26 + _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 3.27 (x) = _vcpu->evtchn_upcall_mask; \ 3.28 } while (0) 3.29 3.30 @@ -97,7 +97,7 @@ do { \ 3.31 do { \ 3.32 vcpu_info_t *_vcpu; \ 3.33 barrier(); \ 3.34 - _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \ 3.35 + _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 3.36 if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \ 3.37 barrier(); /* unmask then check (avoid races) */ \ 3.38 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ 3.39 @@ -110,7 +110,7 @@ do { \ 3.40 #define __save_and_cli(x) \ 3.41 do { \ 3.42 vcpu_info_t *_vcpu; \ 3.43 - _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \ 3.44 + _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 3.45 (x) = _vcpu->evtchn_upcall_mask; \ 3.46 _vcpu->evtchn_upcall_mask = 1; \ 3.47 barrier(); \ 3.48 @@ -123,7 +123,7 @@ do { \ 3.49 #define local_irq_enable() __sti() 3.50 3.51 #define irqs_disabled() \ 3.52 - HYPERVISOR_shared_info->vcpu_data[smp_processor_id()].evtchn_upcall_mask 3.53 + HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].evtchn_upcall_mask 3.54 3.55 /* This is a barrier for the compiler only, NOT the processor! */ 3.56 #define barrier() __asm__ __volatile__("": : :"memory")
4.1 --- a/extras/mini-os/time.c Tue Dec 06 16:40:43 2005 +0000 4.2 +++ b/extras/mini-os/time.c Tue Dec 06 16:40:50 2005 +0000 4.3 @@ -73,7 +73,7 @@ static struct shadow_time_info shadow; 4.4 4.5 static inline int time_values_up_to_date(void) 4.6 { 4.7 - struct vcpu_time_info *src = &HYPERVISOR_shared_info->vcpu_time[0]; 4.8 + struct vcpu_time_info *src = &HYPERVISOR_shared_info->vcpu_info[0].time; 4.9 4.10 return (shadow.version == src->version); 4.11 } 4.12 @@ -127,7 +127,7 @@ static unsigned long get_nsec_offset(voi 4.13 4.14 static void get_time_values_from_xen(void) 4.15 { 4.16 - struct vcpu_time_info *src = &HYPERVISOR_shared_info->vcpu_time[0]; 4.17 + struct vcpu_time_info *src = &HYPERVISOR_shared_info->vcpu_info[0].time; 4.18 4.19 do { 4.20 shadow.version = src->version;
5.1 --- a/extras/mini-os/x86_32.S Tue Dec 06 16:40:43 2005 +0000 5.2 +++ b/extras/mini-os/x86_32.S Tue Dec 06 16:40:50 2005 +0000 5.3 @@ -3,7 +3,11 @@ 5.4 5.5 5.6 .section __xen_guest 5.7 - .asciz "XEN_VER=3.0,LOADER=generic,PT_MODE_WRITABLE" 5.8 + .ascii "GUEST_OS=Mini-OS" 5.9 + .ascii ",XEN_VER=xen-3.0" 5.10 + .ascii ",LOADER=generic" 5.11 + .ascii ",PT_MODE_WRITABLE" 5.12 + .byte 0 5.13 .text 5.14 5.15 .globl _start, shared_info
6.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Tue Dec 06 16:40:43 2005 +0000 6.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Tue Dec 06 16:40:50 2005 +0000 6.3 @@ -76,9 +76,6 @@ 6.4 skb_shinfo(_skb)->frag_list = NULL; \ 6.5 } while (0) 6.6 6.7 -/* Allow headroom on each rx pkt for Ethernet header, alignment padding, ... */ 6.8 -#define RX_HEADROOM 200 6.9 - 6.10 static unsigned long rx_pfn_array[NET_RX_RING_SIZE]; 6.11 static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1]; 6.12 static mmu_update_t rx_mmu[NET_RX_RING_SIZE]; 6.13 @@ -153,14 +150,15 @@ static char *be_state_name[] = { 6.14 #endif 6.15 6.16 #ifdef DEBUG 6.17 -#define DPRINTK(fmt, args...) \ 6.18 - printk(KERN_ALERT "netfront (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args) 6.19 +#define DPRINTK(fmt, args...) \ 6.20 + printk(KERN_ALERT "netfront (%s:%d) " fmt, __FUNCTION__, \ 6.21 + __LINE__, ##args) 6.22 #else 6.23 #define DPRINTK(fmt, args...) ((void)0) 6.24 #endif 6.25 -#define IPRINTK(fmt, args...) \ 6.26 +#define IPRINTK(fmt, args...) \ 6.27 printk(KERN_INFO "netfront: " fmt, ##args) 6.28 -#define WPRINTK(fmt, args...) \ 6.29 +#define WPRINTK(fmt, args...) \ 6.30 printk(KERN_WARNING "netfront: " fmt, ##args) 6.31 6.32 6.33 @@ -537,7 +535,13 @@ static void network_alloc_rx_buffers(str 6.34 */ 6.35 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); 6.36 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { 6.37 - skb = alloc_xen_skb(dev->mtu + RX_HEADROOM); 6.38 + /* 6.39 + * Subtract dev_alloc_skb headroom (16 bytes) and shared info 6.40 + * tailroom then round down to SKB_DATA_ALIGN boundary. 6.41 + */ 6.42 + skb = alloc_xen_skb( 6.43 + (PAGE_SIZE - 16 - sizeof(struct skb_shared_info)) & 6.44 + (-SKB_DATA_ALIGN(1))); 6.45 if (skb == NULL) 6.46 break; 6.47 __skb_queue_tail(&np->rx_batch, skb); 6.48 @@ -567,7 +571,8 @@ static void network_alloc_rx_buffers(str 6.49 rx_pfn_array[i] = virt_to_mfn(skb->head); 6.50 6.51 /* Remove this page from map before passing back to Xen. */ 6.52 - set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT, INVALID_P2M_ENTRY); 6.53 + set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT, 6.54 + INVALID_P2M_ENTRY); 6.55 6.56 MULTI_update_va_mapping(rx_mcl+i, (unsigned long)skb->head, 6.57 __pte(0), 0); 6.58 @@ -809,36 +814,43 @@ static int netif_poll(struct net_device 6.59 } 6.60 6.61 while ((skb = __skb_dequeue(&rxq)) != NULL) { 6.62 + if (skb->len > (dev->mtu + ETH_HLEN)) { 6.63 + if (net_ratelimit()) 6.64 + printk(KERN_INFO "Received packet too big for " 6.65 + "MTU (%d > %d)\n", 6.66 + skb->len - ETH_HLEN, dev->mtu); 6.67 + skb->len = 0; 6.68 + skb->tail = skb->data; 6.69 + init_skb_shinfo(skb); 6.70 + dev_kfree_skb(skb); 6.71 + continue; 6.72 + } 6.73 + 6.74 /* 6.75 * Enough room in skbuff for the data we were passed? Also, 6.76 * Linux expects at least 16 bytes headroom in each rx buffer. 6.77 */ 6.78 if (unlikely(skb->tail > skb->end) || 6.79 unlikely((skb->data - skb->head) < 16)) { 6.80 - nskb = NULL; 6.81 - 6.82 - /* Only copy the packet if it fits in the MTU. */ 6.83 - if (skb->len <= (dev->mtu + ETH_HLEN)) { 6.84 - if ((skb->tail > skb->end) && net_ratelimit()) 6.85 + if (net_ratelimit()) { 6.86 + if (skb->tail > skb->end) 6.87 printk(KERN_INFO "Received packet " 6.88 - "needs %zd bytes more " 6.89 - "headroom.\n", 6.90 + "is %zd bytes beyond tail.\n", 6.91 skb->tail - skb->end); 6.92 + else 6.93 + printk(KERN_INFO "Received packet " 6.94 + "is %zd bytes before head.\n", 6.95 + 16 - (skb->data - skb->head)); 6.96 + } 6.97 6.98 - nskb = alloc_xen_skb(skb->len + 2); 6.99 - if (nskb != NULL) { 6.100 - skb_reserve(nskb, 2); 6.101 - skb_put(nskb, skb->len); 6.102 - memcpy(nskb->data, 6.103 - skb->data, 6.104 - skb->len); 6.105 - nskb->dev = skb->dev; 6.106 - } 6.107 + nskb = alloc_xen_skb(skb->len + 2); 6.108 + if (nskb != NULL) { 6.109 + skb_reserve(nskb, 2); 6.110 + skb_put(nskb, skb->len); 6.111 + memcpy(nskb->data, skb->data, skb->len); 6.112 + nskb->dev = skb->dev; 6.113 + nskb->ip_summed = skb->ip_summed; 6.114 } 6.115 - else if (net_ratelimit()) 6.116 - printk(KERN_INFO "Received packet too big for " 6.117 - "MTU (%d > %d)\n", 6.118 - skb->len - ETH_HLEN, dev->mtu); 6.119 6.120 /* Reinitialise and then destroy the old skbuff. */ 6.121 skb->len = 0;
7.1 --- a/tools/python/xen/xend/server/tpmif.py Tue Dec 06 16:40:43 2005 +0000 7.2 +++ b/tools/python/xen/xend/server/tpmif.py Tue Dec 06 16:40:50 2005 +0000 7.3 @@ -50,10 +50,9 @@ class TPMifController(DevController): 7.4 7.5 result = DevController.configuration(self, devid) 7.6 7.7 - (instance,) = self.readBackend(devid, 'instance') 7.8 + instance = self.readBackend(devid, 'instance') 7.9 7.10 if instance: 7.11 result.append(['instance', instance]) 7.12 - log.info("configuration: instance=%d." % instance) 7.13 7.14 return result
8.1 --- a/xen/arch/x86/setup.c Tue Dec 06 16:40:43 2005 +0000 8.2 +++ b/xen/arch/x86/setup.c Tue Dec 06 16:40:50 2005 +0000 8.3 @@ -571,7 +571,7 @@ void arch_get_xen_caps(xen_capabilities_ 8.4 p += sprintf(p, "xen-%d.%d-x86_64 ", XEN_VERSION, XEN_SUBVERSION); 8.5 if ( hvm_enabled ) 8.6 { 8.7 - //p += sprintf(p, "hvm-%d.%d-x86_32 ", XEN_VERSION, XEN_SUBVERSION); 8.8 + p += sprintf(p, "hvm-%d.%d-x86_32 ", XEN_VERSION, XEN_SUBVERSION); 8.9 //p += sprintf(p, "hvm-%d.%d-x86_32p ", XEN_VERSION, XEN_SUBVERSION); 8.10 p += sprintf(p, "hvm-%d.%d-x86_64 ", XEN_VERSION, XEN_SUBVERSION); 8.11 } 8.12 @@ -581,7 +581,7 @@ void arch_get_xen_caps(xen_capabilities_ 8.13 p++; 8.14 8.15 #endif 8.16 - 8.17 + 8.18 *(p-1) = 0; 8.19 8.20 BUG_ON((p - info) > sizeof(xen_capabilities_info_t));
9.1 --- a/xen/arch/x86/shadow32.c Tue Dec 06 16:40:43 2005 +0000 9.2 +++ b/xen/arch/x86/shadow32.c Tue Dec 06 16:40:50 2005 +0000 9.3 @@ -2203,7 +2203,7 @@ int shadow_remove_all_write_access( 9.4 } 9.5 9.6 if ( shadow_mode_external(d) ) { 9.7 - if (write_refs-- == 0) 9.8 + if (--write_refs == 0) 9.9 return 0; 9.10 9.11 // Use the back pointer to locate the shadow page that can contain
10.1 --- a/xen/arch/x86/x86_32/domain_page.c Tue Dec 06 16:40:43 2005 +0000 10.2 +++ b/xen/arch/x86/x86_32/domain_page.c Tue Dec 06 16:40:50 2005 +0000 10.3 @@ -40,10 +40,10 @@ static void flush_all_ready_maps(void) 10.4 cache[i] = l1e_empty(); 10.5 } 10.6 10.7 -void *map_domain_page(unsigned long pfn) 10.8 +void *map_domain_pages(unsigned long pfn, unsigned int order) 10.9 { 10.10 unsigned long va; 10.11 - unsigned int idx, cpu = smp_processor_id(); 10.12 + unsigned int idx, i, flags, cpu = smp_processor_id(); 10.13 l1_pgentry_t *cache = mapcache; 10.14 #ifndef NDEBUG 10.15 unsigned int flush_count = 0; 10.16 @@ -72,10 +72,15 @@ void *map_domain_page(unsigned long pfn) 10.17 local_flush_tlb(); 10.18 shadow_epoch[cpu] = ++epoch; 10.19 } 10.20 + 10.21 + flags = 0; 10.22 + for ( i = 0; i < (1U << order); i++ ) 10.23 + flags |= l1e_get_flags(cache[idx+i]); 10.24 } 10.25 - while ( l1e_get_flags(cache[idx]) & _PAGE_PRESENT ); 10.26 + while ( flags & _PAGE_PRESENT ); 10.27 10.28 - cache[idx] = l1e_from_pfn(pfn, __PAGE_HYPERVISOR); 10.29 + for ( i = 0; i < (1U << order); i++ ) 10.30 + cache[idx+i] = l1e_from_pfn(pfn+i, __PAGE_HYPERVISOR); 10.31 10.32 spin_unlock(&map_lock); 10.33 10.34 @@ -83,11 +88,12 @@ void *map_domain_page(unsigned long pfn) 10.35 return (void *)va; 10.36 } 10.37 10.38 -void unmap_domain_page(void *va) 10.39 +void unmap_domain_pages(void *va, unsigned int order) 10.40 { 10.41 - unsigned int idx; 10.42 + unsigned int idx, i; 10.43 ASSERT((void *)MAPCACHE_VIRT_START <= va); 10.44 ASSERT(va < (void *)MAPCACHE_VIRT_END); 10.45 idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT; 10.46 - l1e_add_flags(mapcache[idx], READY_FOR_TLB_FLUSH); 10.47 + for ( i = 0; i < (1U << order); i++ ) 10.48 + l1e_add_flags(mapcache[idx+i], READY_FOR_TLB_FLUSH); 10.49 }
11.1 --- a/xen/include/xen/domain_page.h Tue Dec 06 16:40:43 2005 +0000 11.2 +++ b/xen/include/xen/domain_page.h Tue Dec 06 16:40:50 2005 +0000 11.3 @@ -10,19 +10,22 @@ 11.4 #include <xen/config.h> 11.5 #include <xen/mm.h> 11.6 11.7 +#define map_domain_page(pfn) map_domain_pages(pfn,0) 11.8 +#define unmap_domain_page(va) unmap_domain_pages(va,0) 11.9 + 11.10 #ifdef CONFIG_DOMAIN_PAGE 11.11 11.12 /* 11.13 - * Maps a given page frame, returning the mmap'ed virtual address. The page is 11.14 - * now accessible until a corresponding call to unmap_domain_page(). 11.15 + * Maps a given range of page frames, returning the mapped virtual address. The 11.16 + * pages are now accessible until a corresponding call to unmap_domain_page(). 11.17 */ 11.18 -extern void *map_domain_page(unsigned long pfn); 11.19 +extern void *map_domain_pages(unsigned long pfn, unsigned int order); 11.20 11.21 /* 11.22 - * Pass a VA within a page previously mapped with map_domain_page(). 11.23 - * That page will then be removed from the mapping lists. 11.24 + * Pass a VA within the first page of a range previously mapped with 11.25 + * map_omain_pages(). Those pages will then be removed from the mapping lists. 11.26 */ 11.27 -extern void unmap_domain_page(void *va); 11.28 +extern void unmap_domain_pages(void *va, unsigned int order); 11.29 11.30 #define DMCACHE_ENTRY_VALID 1U 11.31 #define DMCACHE_ENTRY_HELD 2U 11.32 @@ -84,8 +87,8 @@ domain_mmap_cache_destroy(struct domain_ 11.33 11.34 #else /* !CONFIG_DOMAIN_PAGE */ 11.35 11.36 -#define map_domain_page(pfn) phys_to_virt((pfn)<<PAGE_SHIFT) 11.37 -#define unmap_domain_page(va) ((void)(va)) 11.38 +#define map_domain_pages(pfn,order) phys_to_virt((pfn)<<PAGE_SHIFT) 11.39 +#define unmap_domain_pages(va,order) ((void)((void)(va),(void)(order))) 11.40 11.41 struct domain_mmap_cache { 11.42 };