direct-io.hg
changeset 2541:a8fef40fad11
bitkeeper revision 1.1159.1.172 (41542610ZBPAMBCg2f-D1VTZ8pdChw)
Merge xenbk@gandalf:/var/bk/xeno-unstable.bk
into wray-m-3.hpl.hp.com:/home/mjw/repos-bk/xeno-unstable.bk
Merge xenbk@gandalf:/var/bk/xeno-unstable.bk
into wray-m-3.hpl.hp.com:/home/mjw/repos-bk/xeno-unstable.bk
line diff
1.1 --- a/BitKeeper/etc/logging_ok Fri Sep 24 13:42:42 2004 +0000 1.2 +++ b/BitKeeper/etc/logging_ok Fri Sep 24 13:43:30 2004 +0000 1.3 @@ -13,6 +13,7 @@ cl349@freefall.cl.cam.ac.uk 1.4 cl349@labyrinth.cl.cam.ac.uk 1.5 djm@kirby.fc.hp.com 1.6 gm281@boulderdash.cl.cam.ac.uk 1.7 +gm281@tetrapod.cl.cam.ac.uk 1.8 iap10@freefall.cl.cam.ac.uk 1.9 iap10@labyrinth.cl.cam.ac.uk 1.10 iap10@nidd.cl.cam.ac.uk
2.1 --- a/linux-2.4.27-xen-sparse/arch/xen/config.in Fri Sep 24 13:42:42 2004 +0000 2.2 +++ b/linux-2.4.27-xen-sparse/arch/xen/config.in Fri Sep 24 13:43:30 2004 +0000 2.3 @@ -17,6 +17,8 @@ comment 'Xen' 2.4 bool 'Support for privileged operations (domain 0)' CONFIG_XEN_PRIVILEGED_GUEST 2.5 bool 'Device-driver domain (physical device access)' CONFIG_XEN_PHYSDEV_ACCESS 2.6 bool 'Scrub memory before freeing it to Xen' CONFIG_XEN_SCRUB_PAGES 2.7 +bool 'Network-device frontend driver' CONFIG_XEN_NETDEV_FRONTEND 2.8 +bool 'Block-device frontend driver' CONFIG_XEN_BLKDEV_FRONTEND 2.9 endmenu 2.10 # The IBM S/390 patch needs this. 2.11 define_bool CONFIG_NO_IDLE_HZ y
3.1 --- a/linux-2.4.27-xen-sparse/arch/xen/defconfig-xen0 Fri Sep 24 13:42:42 2004 +0000 3.2 +++ b/linux-2.4.27-xen-sparse/arch/xen/defconfig-xen0 Fri Sep 24 13:43:30 2004 +0000 3.3 @@ -13,6 +13,8 @@ CONFIG_UID16=y 3.4 CONFIG_XEN_PRIVILEGED_GUEST=y 3.5 CONFIG_XEN_PHYSDEV_ACCESS=y 3.6 CONFIG_XEN_SCRUB_PAGES=y 3.7 +CONFIG_XEN_NETDEV_FRONTEND=y 3.8 +CONFIG_XEN_BLKDEV_FRONTEND=y 3.9 CONFIG_NO_IDLE_HZ=y 3.10 CONFIG_FOREIGN_PAGES=y 3.11
4.1 --- a/linux-2.4.27-xen-sparse/arch/xen/defconfig-xenU Fri Sep 24 13:42:42 2004 +0000 4.2 +++ b/linux-2.4.27-xen-sparse/arch/xen/defconfig-xenU Fri Sep 24 13:43:30 2004 +0000 4.3 @@ -1,5 +1,5 @@ 4.4 # 4.5 -# Automatically generated make config: don't edit 4.6 +# Automatically generated by make menuconfig: don't edit 4.7 # 4.8 CONFIG_XEN=y 4.9 CONFIG_X86=y 4.10 @@ -17,6 +17,8 @@ CONFIG_UID16=y 4.11 CONFIG_XEN_BLKDEV_FRONTEND=y 4.12 CONFIG_XEN_NETDEV_FRONTEND=y 4.13 CONFIG_XEN_SCRUB_PAGES=y 4.14 +CONFIG_XEN_NETDEV_FRONTEND=y 4.15 +CONFIG_XEN_BLKDEV_FRONTEND=y 4.16 CONFIG_NO_IDLE_HZ=y 4.17 # CONFIG_FOREIGN_PAGES is not set 4.18 CONFIG_NETDEVICES=y 4.19 @@ -156,10 +158,6 @@ CONFIG_IP_NF_TARGET_ULOG=y 4.20 # CONFIG_IP_SCTP is not set 4.21 # CONFIG_ATM is not set 4.22 # CONFIG_VLAN_8021Q is not set 4.23 - 4.24 -# 4.25 -# 4.26 -# 4.27 # CONFIG_IPX is not set 4.28 # CONFIG_ATALK is not set 4.29 4.30 @@ -192,20 +190,12 @@ CONFIG_IP_NF_TARGET_ULOG=y 4.31 # SCSI support 4.32 # 4.33 CONFIG_SCSI=y 4.34 - 4.35 -# 4.36 -# SCSI support type (disk, tape, CD-ROM) 4.37 -# 4.38 CONFIG_BLK_DEV_SD=y 4.39 CONFIG_SD_EXTRA_DEVS=40 4.40 # CONFIG_CHR_DEV_ST is not set 4.41 # CONFIG_CHR_DEV_OSST is not set 4.42 # CONFIG_BLK_DEV_SR is not set 4.43 CONFIG_CHR_DEV_SG=y 4.44 - 4.45 -# 4.46 -# Some SCSI devices (e.g. CD jukebox) support multiple LUNs 4.47 -# 4.48 # CONFIG_SCSI_DEBUG_QUEUES is not set 4.49 # CONFIG_SCSI_MULTI_LUN is not set 4.50 # CONFIG_SCSI_CONSTANTS is not set 4.51 @@ -317,10 +307,6 @@ CONFIG_PSMOUSE=y 4.52 # CONFIG_INPUT_EMU10K1 is not set 4.53 # CONFIG_INPUT_SERIO is not set 4.54 # CONFIG_INPUT_SERPORT is not set 4.55 - 4.56 -# 4.57 -# Joysticks 4.58 -# 4.59 # CONFIG_INPUT_ANALOG is not set 4.60 # CONFIG_INPUT_A3D is not set 4.61 # CONFIG_INPUT_ADI is not set
5.1 --- a/linux-2.4.27-xen-sparse/arch/xen/drivers/blkif/Makefile Fri Sep 24 13:42:42 2004 +0000 5.2 +++ b/linux-2.4.27-xen-sparse/arch/xen/drivers/blkif/Makefile Fri Sep 24 13:43:30 2004 +0000 5.3 @@ -1,8 +1,8 @@ 5.4 5.5 O_TARGET := drv.o 5.6 5.7 -subdir-y += frontend 5.8 -obj-y += frontend/drv.o 5.9 +subdir-$(CONFIG_XEN_NETDEV_FRONTEND) += frontend 5.10 +obj-$(CONFIG_XEN_NETDEV_FRONTEND) += frontend/drv.o 5.11 5.12 subdir-$(CONFIG_XEN_PHYSDEV_ACCESS) += backend 5.13 obj-$(CONFIG_XEN_PHYSDEV_ACCESS) += backend/drv.o
6.1 --- a/linux-2.4.27-xen-sparse/arch/xen/drivers/netif/Makefile Fri Sep 24 13:42:42 2004 +0000 6.2 +++ b/linux-2.4.27-xen-sparse/arch/xen/drivers/netif/Makefile Fri Sep 24 13:43:30 2004 +0000 6.3 @@ -1,8 +1,8 @@ 6.4 6.5 O_TARGET := drv.o 6.6 6.7 -subdir-y += frontend 6.8 -obj-y += frontend/drv.o 6.9 +subdir-$(CONFIG_XEN_NETDEV_FRONTEND) += frontend 6.10 +obj-$(CONFIG_XEN_NETDEV_FRONTEND) += frontend/drv.o 6.11 6.12 subdir-$(CONFIG_XEN_PHYSDEV_ACCESS) += backend 6.13 obj-$(CONFIG_XEN_PHYSDEV_ACCESS) += backend/drv.o
7.1 --- a/linux-2.6.8.1-xen-sparse/drivers/xen/netback/netback.c Fri Sep 24 13:42:42 2004 +0000 7.2 +++ b/linux-2.6.8.1-xen-sparse/drivers/xen/netback/netback.c Fri Sep 24 13:43:30 2004 +0000 7.3 @@ -13,6 +13,7 @@ 7.4 #include "common.h" 7.5 7.6 static void netif_page_release(struct page *page); 7.7 +static void netif_skb_release(struct sk_buff *skb); 7.8 static void make_tx_response(netif_t *netif, 7.9 u16 id, 7.10 s8 st); 7.11 @@ -40,7 +41,8 @@ static unsigned char rx_notify[NR_EVENT_ 7.12 static unsigned long mmap_vstart; 7.13 #define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE)) 7.14 7.15 -#define PKT_PROT_LEN (ETH_HLEN + 20) 7.16 +#define PKT_MIN_LEN (ETH_HLEN + 20) 7.17 +#define PKT_PROT_LEN 64 7.18 7.19 static struct { 7.20 netif_tx_request_t req; 7.21 @@ -385,6 +387,7 @@ static void net_tx_action(unsigned long 7.22 NETIF_RING_IDX i; 7.23 multicall_entry_t *mcl; 7.24 PEND_RING_IDX dc, dp; 7.25 + unsigned int data_len; 7.26 7.27 if ( (dc = dealloc_cons) == (dp = dealloc_prod) ) 7.28 goto skip_dealloc; 7.29 @@ -497,7 +500,7 @@ static void net_tx_action(unsigned long 7.30 7.31 netif_schedule_work(netif); 7.32 7.33 - if ( unlikely(txreq.size <= PKT_PROT_LEN) || 7.34 + if ( unlikely(txreq.size <= PKT_MIN_LEN) || 7.35 unlikely(txreq.size > ETH_FRAME_LEN) ) 7.36 { 7.37 DPRINTK("Bad packet size: %d\n", txreq.size); 7.38 @@ -519,7 +522,9 @@ static void net_tx_action(unsigned long 7.39 7.40 pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)]; 7.41 7.42 - if ( unlikely((skb = alloc_skb(PKT_PROT_LEN+16, GFP_ATOMIC)) == NULL) ) 7.43 + data_len = txreq.size > PKT_PROT_LEN ? PKT_PROT_LEN : txreq.size; 7.44 + 7.45 + if ( unlikely((skb = alloc_skb(data_len+16, GFP_ATOMIC)) == NULL) ) 7.46 { 7.47 DPRINTK("Can't allocate a skb in start_xmit.\n"); 7.48 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR); 7.49 @@ -578,19 +583,28 @@ static void net_tx_action(unsigned long 7.50 phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] = 7.51 FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT); 7.52 7.53 - __skb_put(skb, PKT_PROT_LEN); 7.54 + data_len = txreq.size > PKT_PROT_LEN ? PKT_PROT_LEN : txreq.size; 7.55 + 7.56 + __skb_put(skb, data_len); 7.57 memcpy(skb->data, 7.58 (void *)(MMAP_VADDR(pending_idx)|(txreq.addr&~PAGE_MASK)), 7.59 - PKT_PROT_LEN); 7.60 + data_len); 7.61 7.62 - /* Append the packet payload as a fragment. */ 7.63 - skb_shinfo(skb)->frags[0].page = 7.64 - virt_to_page(MMAP_VADDR(pending_idx)); 7.65 - skb_shinfo(skb)->frags[0].size = txreq.size - PKT_PROT_LEN; 7.66 - skb_shinfo(skb)->frags[0].page_offset = 7.67 - (txreq.addr + PKT_PROT_LEN) & ~PAGE_MASK; 7.68 - skb_shinfo(skb)->nr_frags = 1; 7.69 - skb->data_len = txreq.size - PKT_PROT_LEN; 7.70 + if (data_len < txreq.size) { 7.71 + /* Append the packet payload as a fragment. */ 7.72 + skb_shinfo(skb)->frags[0].page = 7.73 + virt_to_page(MMAP_VADDR(pending_idx)); 7.74 + skb_shinfo(skb)->frags[0].size = txreq.size - data_len; 7.75 + skb_shinfo(skb)->frags[0].page_offset = 7.76 + (txreq.addr + data_len) & ~PAGE_MASK; 7.77 + skb_shinfo(skb)->nr_frags = 1; 7.78 + } else { 7.79 + skb_shinfo(skb)->frags[0].page = 7.80 + virt_to_page(MMAP_VADDR(pending_idx)); 7.81 + skb->destructor = netif_skb_release; 7.82 + } 7.83 + 7.84 + skb->data_len = txreq.size - data_len; 7.85 skb->len += skb->data_len; 7.86 7.87 skb->dev = netif->dev; 7.88 @@ -606,13 +620,9 @@ static void net_tx_action(unsigned long 7.89 } 7.90 } 7.91 7.92 -static void netif_page_release(struct page *page) 7.93 +static void netif_idx_release(u16 pending_idx) 7.94 { 7.95 unsigned long flags; 7.96 - u16 pending_idx = page - virt_to_page(mmap_vstart); 7.97 - 7.98 - /* Ready for next use. */ 7.99 - set_page_count(page, 1); 7.100 7.101 spin_lock_irqsave(&dealloc_lock, flags); 7.102 dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx; 7.103 @@ -621,6 +631,24 @@ static void netif_page_release(struct pa 7.104 tasklet_schedule(&net_tx_tasklet); 7.105 } 7.106 7.107 +static void netif_page_release(struct page *page) 7.108 +{ 7.109 + u16 pending_idx = page - virt_to_page(mmap_vstart); 7.110 + 7.111 + /* Ready for next use. */ 7.112 + set_page_count(page, 1); 7.113 + 7.114 + netif_idx_release(pending_idx); 7.115 +} 7.116 + 7.117 +static void netif_skb_release(struct sk_buff *skb) 7.118 +{ 7.119 + struct page *page = skb_shinfo(skb)->frags[0].page; 7.120 + u16 pending_idx = page - virt_to_page(mmap_vstart); 7.121 + 7.122 + netif_idx_release(pending_idx); 7.123 +} 7.124 + 7.125 #if 0 7.126 long flush_bufs_for_netif(netif_t *netif) 7.127 {
8.1 --- a/linux-2.6.8.1-xen-sparse/drivers/xen/netfront/netfront.c Fri Sep 24 13:42:42 2004 +0000 8.2 +++ b/linux-2.6.8.1-xen-sparse/drivers/xen/netfront/netfront.c Fri Sep 24 13:43:30 2004 +0000 8.3 @@ -249,16 +249,14 @@ static int vif_wake(struct net_device *d 8.4 { 8.5 struct sk_buff *skb; 8.6 u32 src_ip, dst_ip; 8.7 - unsigned char dst_hw[ETH_ALEN]; 8.8 - 8.9 - memset(dst_hw, 0xff, ETH_ALEN); 8.10 8.11 dst_ip = INADDR_BROADCAST; 8.12 src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK); 8.13 8.14 - skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, 8.15 + skb = arp_create(ARPOP_REPLY, ETH_P_ARP, 8.16 dst_ip, dev, src_ip, 8.17 - dst_hw, dev->dev_addr, NULL); 8.18 + /*dst_hw*/ NULL, /*src_hw*/ NULL, 8.19 + /*target_hw*/ dev->dev_addr); 8.20 if ( skb == NULL ) 8.21 return -ENOMEM; 8.22
9.1 --- a/xen/arch/x86/domain.c Fri Sep 24 13:42:42 2004 +0000 9.2 +++ b/xen/arch/x86/domain.c Fri Sep 24 13:43:30 2004 +0000 9.3 @@ -223,12 +223,12 @@ void arch_do_createdomain(struct domain 9.4 virt_to_phys(&machine_to_phys_mapping[0])>>PAGE_SHIFT; 9.5 SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d); 9.6 machine_to_phys_mapping[virt_to_phys(d->shared_info) >> 9.7 - PAGE_SHIFT] = 0x80000000UL; /* debug */ 9.8 + PAGE_SHIFT] = INVALID_P2M_ENTRY; 9.9 9.10 d->mm.perdomain_pt = (l1_pgentry_t *)alloc_xenheap_page(); 9.11 memset(d->mm.perdomain_pt, 0, PAGE_SIZE); 9.12 machine_to_phys_mapping[virt_to_phys(d->mm.perdomain_pt) >> 9.13 - PAGE_SHIFT] = 0x0fffdeadUL; /* debug */ 9.14 + PAGE_SHIFT] = INVALID_P2M_ENTRY; 9.15 } 9.16 9.17 int arch_final_setup_guestos(struct domain *d, full_execution_context_t *c)
10.1 --- a/xen/arch/x86/memory.c Fri Sep 24 13:42:42 2004 +0000 10.2 +++ b/xen/arch/x86/memory.c Fri Sep 24 13:43:30 2004 +0000 10.3 @@ -1802,6 +1802,12 @@ static __init int ptwr_init(void) 10.4 (void *)alloc_xenheap_page(); 10.5 ptwr_info[i].ptinfo[PTWR_PT_INACTIVE].page = 10.6 (void *)alloc_xenheap_page(); 10.7 + machine_to_phys_mapping[virt_to_phys( 10.8 + ptwr_info[i].ptinfo[PTWR_PT_ACTIVE].page)>>PAGE_SHIFT] = 10.9 + INVALID_P2M_ENTRY; 10.10 + machine_to_phys_mapping[virt_to_phys( 10.11 + ptwr_info[i].ptinfo[PTWR_PT_INACTIVE].page)>>PAGE_SHIFT] = 10.12 + INVALID_P2M_ENTRY; 10.13 } 10.14 10.15 return 0;
11.1 --- a/xen/arch/x86/shadow.c Fri Sep 24 13:42:42 2004 +0000 11.2 +++ b/xen/arch/x86/shadow.c Fri Sep 24 13:43:30 2004 +0000 11.3 @@ -200,8 +200,6 @@ int shadow_mode_enable( struct domain *p 11.4 struct shadow_status **fptr; 11.5 int i; 11.6 11.7 - m->shadow_mode = mode; 11.8 - 11.9 // allocate hashtable 11.10 m->shadow_ht = xmalloc(shadow_ht_buckets * 11.11 sizeof(struct shadow_status)); 11.12 @@ -241,16 +239,25 @@ int shadow_mode_enable( struct domain *p 11.13 if( m->shadow_dirty_bitmap == NULL ) 11.14 { 11.15 m->shadow_dirty_bitmap_size = 0; 11.16 + BUG(); 11.17 goto nomem; 11.18 } 11.19 memset(m->shadow_dirty_bitmap,0,m->shadow_dirty_bitmap_size/8); 11.20 } 11.21 11.22 + m->shadow_mode = mode; 11.23 + 11.24 // call shadow_mk_pagetable 11.25 __shadow_mk_pagetable( m ); 11.26 return 0; 11.27 11.28 nomem: 11.29 + if( m->shadow_ht ) { 11.30 + xfree( m->shadow_ht ); m->shadow_ht = NULL; }; 11.31 + 11.32 + if( m->shadow_ht_extras ) { 11.33 + xfree( m->shadow_ht_extras ); m->shadow_ht_extras = NULL; }; 11.34 + 11.35 return -ENOMEM; 11.36 } 11.37 11.38 @@ -285,7 +292,10 @@ void __shadow_mode_disable(struct domain 11.39 } 11.40 11.41 // free the hashtable itself 11.42 - xfree( &m->shadow_ht[0] ); 11.43 + xfree( m->shadow_ht ); 11.44 + 11.45 + m->shadow_ht = NULL; 11.46 + m->shadow_ht_extras = NULL; 11.47 } 11.48 11.49 static int shadow_mode_table_op(struct domain *d, 11.50 @@ -314,8 +324,6 @@ static int shadow_mode_table_op(struct d 11.51 switch(op) 11.52 { 11.53 case DOM0_SHADOW_CONTROL_OP_FLUSH: 11.54 - // XXX THIS IS VERY DANGEROUS : MUST ENSURE THE PTs ARE NOT IN USE ON 11.55 - // OTHER CPU -- fix when we get sched sync pause. 11.56 __free_shadow_table( m ); 11.57 break; 11.58 11.59 @@ -452,12 +460,12 @@ int shadow_mode_control(struct domain *d 11.60 else if ( cmd == DOM0_SHADOW_CONTROL_OP_ENABLE_TEST ) 11.61 { 11.62 shadow_mode_disable(d); 11.63 - shadow_mode_enable(d, SHM_test); 11.64 + rc = shadow_mode_enable(d, SHM_test); 11.65 } 11.66 else if ( cmd == DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY ) 11.67 { 11.68 shadow_mode_disable(d); 11.69 - shadow_mode_enable(d, SHM_logdirty); 11.70 + rc = shadow_mode_enable(d, SHM_logdirty); 11.71 } 11.72 else if ( shadow_mode(d) && 11.73 (cmd >= DOM0_SHADOW_CONTROL_OP_FLUSH) &&
12.1 --- a/xen/common/sched_atropos.c Fri Sep 24 13:42:42 2004 +0000 12.2 +++ b/xen/common/sched_atropos.c Fri Sep 24 13:43:30 2004 +0000 12.3 @@ -23,26 +23,17 @@ 12.4 #include <hypervisor-ifs/sched_ctl.h> 12.5 #include <xen/trace.h> 12.6 12.7 -/* 12.8 - * KAF -- Atropos is broken by the new scheduler interfaces. 12.9 - * It'll need fixing to get rid of use of ATROPOS_TASK__* 12.10 - */ 12.11 -#ifdef KAF_KILLED 12.12 - 12.13 #define ATROPOS_TASK_UNBLOCKED 16 12.14 #define ATROPOS_TASK_WAIT 32 12.15 - 12.16 -#define Activation_Reason_Allocated 1 12.17 -#define Activation_Reason_Preempted 2 12.18 -#define Activation_Reason_Extra 3 12.19 +#define ATROPOS_TASK_BLOCKED 48 12.20 12.21 /* Atropos-specific per-domain data */ 12.22 struct at_dom_info 12.23 { 12.24 /* MAW Xen additions */ 12.25 struct domain *owner; /* the domain this data belongs to */ 12.26 + struct list_head run_list; /* runqueue */ 12.27 struct list_head waitq; /* wait queue */ 12.28 - int reason; /* reason domain was last scheduled */ 12.29 12.30 /* (what remains of) the original fields */ 12.31 12.32 @@ -57,26 +48,59 @@ struct at_dom_info 12.33 s_time_t latency; /* Unblocking latency */ 12.34 12.35 int xtratime; /* Prepared to accept extra time? */ 12.36 + int state; /* Keeps Atropos domain state */ 12.37 }; 12.38 12.39 /* Atropos-specific per-CPU data */ 12.40 struct at_cpu_info 12.41 { 12.42 + spinlock_t runq_lock; 12.43 + struct list_head runq; /* run queue */ 12.44 + spinlock_t waitq_lock; 12.45 struct list_head waitq; /* wait queue*/ 12.46 }; 12.47 12.48 12.49 #define DOM_INFO(_p) ((struct at_dom_info *)((_p)->sched_priv)) 12.50 -#define CPU_INFO(_c) ((struct at_cpu_info *)((_c).sched_priv)) 12.51 -#define WAITQ(cpu) (&CPU_INFO(schedule_data[cpu])->waitq) 12.52 -#define RUNQ(cpu) (&schedule_data[cpu].runqueue) 12.53 +#define CPU_INFO(_c) ((struct at_cpu_info *)((schedule_data[_c]).sched_priv)) 12.54 +#define WAITQ(cpu) (&CPU_INFO(cpu)->waitq) 12.55 +#define RUNQ(cpu) (&CPU_INFO(cpu)->runq) 12.56 +#define RUNLIST(_d) (&DOM_INFO(_d)->run_list) 12.57 12.58 #define BESTEFFORT_QUANTUM MILLISECS(5) 12.59 12.60 +static void at_dump_cpu_state(int cpu); 12.61 + 12.62 12.63 /* SLAB cache for struct at_dom_info objects */ 12.64 static xmem_cache_t *dom_info_cache; 12.65 12.66 +/* 12.67 + * Wrappers for run-queue management. Must be called with the run_lock 12.68 + * held. 12.69 + */ 12.70 +static inline void __add_to_runqueue_head(struct domain *d) 12.71 +{ 12.72 + list_add(RUNLIST(d), RUNQ(d->processor)); 12.73 +} 12.74 + 12.75 +static inline void __add_to_runqueue_tail(struct domain *d) 12.76 +{ 12.77 + list_add_tail(RUNLIST(d), RUNQ(d->processor)); 12.78 +} 12.79 + 12.80 +static inline void __del_from_runqueue(struct domain *d) 12.81 +{ 12.82 + struct list_head *runlist = RUNLIST(d); 12.83 + list_del(runlist); 12.84 + runlist->next = NULL; 12.85 +} 12.86 + 12.87 +static inline int __task_on_runqueue(struct domain *d) 12.88 +{ 12.89 + return (RUNLIST(d))->next != NULL; 12.90 +} 12.91 + 12.92 12.93 /** calculate the length of a linked list */ 12.94 static int q_len(struct list_head *q) 12.95 @@ -114,15 +138,17 @@ static inline struct domain *waitq_el(st 12.96 static void requeue(struct domain *sdom) 12.97 { 12.98 struct at_dom_info *inf = DOM_INFO(sdom); 12.99 - struct list_head *prev = WAITQ(sdom->processor); 12.100 + struct list_head *prev; 12.101 struct list_head *next; 12.102 12.103 - if(sdom->state == ATROPOS_TASK_WAIT || 12.104 - sdom->state == ATROPOS_TASK_UNBLOCKED ) 12.105 + 12.106 + if(!domain_runnable(sdom)) return; 12.107 + 12.108 + if(inf->state == ATROPOS_TASK_WAIT || 12.109 + inf->state == ATROPOS_TASK_UNBLOCKED) 12.110 { 12.111 - /* insert into ordered wait queue */ 12.112 + prev = WAITQ(sdom->processor); 12.113 12.114 - prev = WAITQ(sdom->processor); 12.115 list_for_each(next, WAITQ(sdom->processor)) 12.116 { 12.117 struct at_dom_info *i = 12.118 @@ -144,16 +170,17 @@ static void requeue(struct domain *sdom) 12.119 else if ( domain_runnable(sdom) ) 12.120 { 12.121 /* insert into ordered run queue */ 12.122 + 12.123 prev = RUNQ(sdom->processor); 12.124 12.125 list_for_each(next, RUNQ(sdom->processor)) 12.126 { 12.127 - struct domain *p = list_entry(next, struct domain, 12.128 + struct at_dom_info *p = list_entry(next, struct at_dom_info, 12.129 run_list); 12.130 12.131 - if( DOM_INFO(p)->deadline > inf->deadline || is_idle_task(p) ) 12.132 + if( p->deadline > inf->deadline || is_idle_task(p->owner) ) 12.133 { 12.134 - __list_add(&sdom->run_list, prev, next); 12.135 + __list_add(&inf->run_list, prev, next); 12.136 break; 12.137 } 12.138 12.139 @@ -161,12 +188,27 @@ static void requeue(struct domain *sdom) 12.140 } 12.141 12.142 if ( next == RUNQ(sdom->processor) ) 12.143 - list_add_tail(&sdom->run_list, RUNQ(sdom->processor)); 12.144 + list_add_tail(&inf->run_list, RUNQ(sdom->processor)); 12.145 + 12.146 + 12.147 } 12.148 /* silently ignore tasks in other states like BLOCKED, DYING, STOPPED, etc 12.149 * - they shouldn't be on any queue */ 12.150 } 12.151 12.152 +/** at_alloc_task - allocate private info for a task */ 12.153 +static int at_alloc_task(struct domain *p) 12.154 +{ 12.155 + ASSERT(p != NULL); 12.156 + 12.157 + p->sched_priv = xmem_cache_alloc(dom_info_cache); 12.158 + if( p->sched_priv == NULL ) 12.159 + return -1; 12.160 + 12.161 + return 0; 12.162 +} 12.163 + 12.164 + 12.165 /* prepare a task to be added to scheduling */ 12.166 static void at_add_task(struct domain *p) 12.167 { 12.168 @@ -199,14 +241,15 @@ static void at_add_task(struct domain *p 12.169 DOM_INFO(p)->slice = MILLISECS(10); 12.170 DOM_INFO(p)->latency = SECONDS(10); 12.171 DOM_INFO(p)->xtratime = 1; 12.172 - DOM_INFO(p)->deadline = now + SECONDS(10); 12.173 + DOM_INFO(p)->deadline = now; 12.174 +// DOM_INFO(p)->deadline = now + SECONDS(10); 12.175 DOM_INFO(p)->prevddln = 0; 12.176 } 12.177 12.178 + INIT_LIST_HEAD(&(DOM_INFO(p)->run_list)); 12.179 INIT_LIST_HEAD(&(DOM_INFO(p)->waitq)); 12.180 } 12.181 12.182 - 12.183 /** 12.184 * dequeue - remove a domain from any queues it is on. 12.185 * @sdom: the task to remove 12.186 @@ -214,19 +257,16 @@ static void at_add_task(struct domain *p 12.187 static void dequeue(struct domain *sdom) 12.188 { 12.189 struct at_dom_info *inf = DOM_INFO(sdom); 12.190 - 12.191 + 12.192 ASSERT(sdom->domain != IDLE_DOMAIN_ID); 12.193 12.194 /* just delete it from all the queues! */ 12.195 list_del(&inf->waitq); 12.196 INIT_LIST_HEAD(&inf->waitq); 12.197 + 12.198 12.199 if(__task_on_runqueue(sdom)) 12.200 __del_from_runqueue(sdom); 12.201 - 12.202 - sdom->run_list.next = NULL; 12.203 - sdom->run_list.prev = NULL; 12.204 - 12.205 } 12.206 12.207 12.208 @@ -254,44 +294,64 @@ static void unblock(struct domain *sdom) 12.209 { 12.210 s_time_t time = NOW(); 12.211 struct at_dom_info *inf = DOM_INFO(sdom); 12.212 - 12.213 + 12.214 dequeue(sdom); 12.215 12.216 /* We distinguish two cases... short and long blocks */ 12.217 - 12.218 if ( inf->deadline < time ) 12.219 { 12.220 /* Long blocking case */ 12.221 12.222 - /* The sdom has passed its deadline since it was blocked. 12.223 - Give it its new deadline based on the latency value. */ 12.224 - inf->prevddln = time; 12.225 + /* The sdom has passed its deadline since it was blocked. 12.226 + Give it its new deadline based on the latency value. */ 12.227 + inf->prevddln = time; 12.228 12.229 /* Scale the scheduling parameters as requested by the latency hint. */ 12.230 - inf->deadline = time + inf->latency; 12.231 + inf->deadline = time + inf->latency; 12.232 inf->slice = inf->nat_slice / ( inf->nat_period / inf->latency ); 12.233 inf->period = inf->latency; 12.234 - inf->remain = inf->slice; 12.235 + inf->remain = inf->slice; 12.236 } 12.237 - else 12.238 + else 12.239 { 12.240 /* Short blocking case */ 12.241 12.242 - /* We leave REMAIN intact, but put this domain on the WAIT 12.243 - queue marked as recently unblocked. It will be given 12.244 - priority over other domains on the wait queue until while 12.245 - REMAIN>0 in a generous attempt to help it make up for its 12.246 - own foolishness. */ 12.247 - if(inf->remain > 0) 12.248 - sdom->state = ATROPOS_TASK_UNBLOCKED; 12.249 + /* We leave REMAIN intact, but put this domain on the WAIT 12.250 + queue marked as recently unblocked. It will be given 12.251 + priority over other domains on the wait queue until while 12.252 + REMAIN>0 in a generous attempt to help it make up for its 12.253 + own foolishness. */ 12.254 + if(inf->remain > 0) 12.255 + inf->state = ATROPOS_TASK_UNBLOCKED; 12.256 else 12.257 - sdom->state = ATROPOS_TASK_WAIT; 12.258 + inf->state = ATROPOS_TASK_WAIT; 12.259 } 12.260 12.261 requeue(sdom); 12.262 +} 12.263 12.264 + 12.265 +static int at_init_idle_task(struct domain *p) 12.266 +{ 12.267 + if(at_alloc_task(p) < 0) return -1; 12.268 + 12.269 + at_add_task(p); 12.270 + 12.271 + dequeue(p); 12.272 + requeue(p); 12.273 + 12.274 + return 0; 12.275 } 12.276 12.277 + 12.278 +static void block(struct domain* sdom) 12.279 +{ 12.280 + DOM_INFO(sdom)->state = ATROPOS_TASK_BLOCKED; 12.281 + dequeue(sdom); 12.282 + requeue(sdom); 12.283 +} 12.284 + 12.285 + 12.286 /** 12.287 * ATROPOS - main scheduler function 12.288 */ 12.289 @@ -301,13 +361,13 @@ task_slice_t ksched_scheduler(s_time_t t 12.290 s_time_t newtime; 12.291 s_time_t ranfor; /* How long the domain ran */ 12.292 struct domain *sdom; /* tmp. scheduling domain */ 12.293 - int reason; /* reason for reschedule */ 12.294 int cpu = cur_sdom->processor; /* current CPU */ 12.295 struct at_dom_info *cur_info; 12.296 static unsigned long waitq_rrobin = 0; 12.297 int i; 12.298 task_slice_t ret; 12.299 12.300 + 12.301 cur_info = DOM_INFO(cur_sdom); 12.302 12.303 ASSERT( cur_sdom != NULL); 12.304 @@ -333,36 +393,35 @@ task_slice_t ksched_scheduler(s_time_t t 12.305 dequeue(cur_sdom); 12.306 12.307 if ( domain_runnable(cur_sdom) || 12.308 - (cur_sdom->state == ATROPOS_TASK_UNBLOCKED) ) 12.309 + (cur_info->state == ATROPOS_TASK_UNBLOCKED) ) 12.310 { 12.311 12.312 - /* In this block, we are doing accounting for an sdom which has 12.313 - been running in contracted time. Note that this could now happen 12.314 - even if the domain is on the wait queue (i.e. if it blocked) */ 12.315 + /* In this block, we are doing accounting for an sdom which has 12.316 + been running in contracted time. Note that this could now happen 12.317 + even if the domain is on the wait queue (i.e. if it blocked) */ 12.318 12.319 - /* Deduct guaranteed time from the domain */ 12.320 - cur_info->remain -= ranfor; 12.321 + /* Deduct guaranteed time from the domain */ 12.322 + cur_info->remain -= ranfor; 12.323 12.324 - /* If guaranteed time has run out... */ 12.325 - if ( cur_info->remain <= 0 ) 12.326 + /* If guaranteed time has run out... */ 12.327 + if ( cur_info->remain <= 0 ) 12.328 { 12.329 - /* Move domain to correct position in WAIT queue */ 12.330 + /* Move domain to correct position in WAIT queue */ 12.331 /* XXX sdom_unblocked doesn't need this since it is 12.332 already in the correct place. */ 12.333 - cur_sdom->state = ATROPOS_TASK_WAIT; 12.334 - } 12.335 + cur_info->state = ATROPOS_TASK_WAIT; 12.336 + } 12.337 } 12.338 12.339 requeue(cur_sdom); 12.340 12.341 - deschedule_done: 12.342 - 12.343 +deschedule_done: 12.344 /***************************** 12.345 * 12.346 * We have now successfully descheduled the current sdom. 12.347 * The next task is the allocate CPU time to any sdom it is due to. 12.348 * 12.349 - ****************************/ 12.350 + ****************************/ 12.351 cur_sdom = NULL; 12.352 12.353 /***************************** 12.354 @@ -371,13 +430,14 @@ task_slice_t ksched_scheduler(s_time_t t 12.355 * period deadline. If necessary, move them to run queue. 12.356 * 12.357 ****************************/ 12.358 + 12.359 while(!list_empty(WAITQ(cpu)) && 12.360 - DOM_INFO(sdom = waitq_el(WAITQ(cpu)->next))->deadline <= time ) { 12.361 + DOM_INFO(sdom = waitq_el(WAITQ(cpu)->next))->deadline <= time ) 12.362 + { 12.363 12.364 - struct at_dom_info *inf = DOM_INFO(sdom); 12.365 - 12.366 + struct at_dom_info *inf = DOM_INFO(sdom); 12.367 dequeue(sdom); 12.368 - 12.369 + 12.370 if ( inf->period != inf->nat_period ) 12.371 { 12.372 /* This domain has had its parameters adjusted as a result of 12.373 @@ -392,22 +452,22 @@ task_slice_t ksched_scheduler(s_time_t t 12.374 } 12.375 } 12.376 12.377 - /* Domain begins a new period and receives a slice of CPU 12.378 - * If this domain has been blocking then throw away the 12.379 - * rest of it's remain - it can't be trusted */ 12.380 - if (inf->remain > 0) 12.381 - inf->remain = inf->slice; 12.382 - else 12.383 - inf->remain += inf->slice; 12.384 + /* Domain begins a new period and receives a slice of CPU 12.385 + * If this domain has been blocking then throw away the 12.386 + * rest of it's remain - it can't be trusted */ 12.387 + if (inf->remain > 0) 12.388 + inf->remain = inf->slice; 12.389 + else 12.390 + inf->remain += inf->slice; 12.391 12.392 - inf->prevddln = inf->deadline; 12.393 - inf->deadline += inf->period; 12.394 + inf->prevddln = inf->deadline; 12.395 + inf->deadline += inf->period; 12.396 12.397 if ( inf->remain <= 0 ) 12.398 - sdom->state = ATROPOS_TASK_WAIT; 12.399 + inf->state = ATROPOS_TASK_WAIT; 12.400 12.401 - /* Place on the appropriate queue */ 12.402 - requeue(sdom); 12.403 + /* Place on the appropriate queue */ 12.404 + requeue(sdom); 12.405 } 12.406 12.407 /***************************** 12.408 @@ -421,13 +481,11 @@ task_slice_t ksched_scheduler(s_time_t t 12.409 ****************************/ 12.410 12.411 /* we guarantee there's always something on the runqueue */ 12.412 - cur_sdom = list_entry(RUNQ(cpu)->next, 12.413 - struct domain, run_list); 12.414 + cur_info = list_entry(RUNQ(cpu)->next, 12.415 + struct at_dom_info, run_list); 12.416 12.417 - cur_info = DOM_INFO(cur_sdom); 12.418 + cur_sdom = cur_info->owner; 12.419 newtime = time + cur_info->remain; 12.420 - reason = (cur_info->prevddln > cur_sdom->lastschd) ? 12.421 - Activation_Reason_Allocated : Activation_Reason_Preempted; 12.422 12.423 /* MAW - the idle domain is always on the run queue. We run from the 12.424 * runqueue if it's NOT the idle domain or if there's nothing on the wait 12.425 @@ -436,12 +494,13 @@ task_slice_t ksched_scheduler(s_time_t t 12.426 { 12.427 struct list_head *item; 12.428 12.429 - /* Try running a domain on the WAIT queue - this part of the 12.430 - scheduler isn't particularly efficient but then again, we 12.431 - don't have any guaranteed domains to worry about. */ 12.432 + /* Try running a domain on the WAIT queue - this part of the 12.433 + scheduler isn't particularly efficient but then again, we 12.434 + don't have any guaranteed domains to worry about. */ 12.435 12.436 - /* See if there are any unblocked domains on the WAIT 12.437 - queue who we can give preferential treatment to. */ 12.438 + /* See if there are any unblocked domains on the WAIT 12.439 + queue who we can give preferential treatment to. */ 12.440 + 12.441 list_for_each(item, WAITQ(cpu)) 12.442 { 12.443 struct at_dom_info *inf = 12.444 @@ -449,23 +508,24 @@ task_slice_t ksched_scheduler(s_time_t t 12.445 12.446 sdom = inf->owner; 12.447 12.448 - if (sdom->state == ATROPOS_TASK_UNBLOCKED) { 12.449 - cur_sdom = sdom; 12.450 - cur_info = inf; 12.451 - newtime = time + inf->remain; 12.452 - reason = Activation_Reason_Preempted; 12.453 - goto found; 12.454 + if (inf->state == ATROPOS_TASK_UNBLOCKED) 12.455 + { 12.456 + cur_sdom = sdom; 12.457 + cur_info = inf; 12.458 + newtime = time + inf->remain; 12.459 + goto found; 12.460 + } 12.461 } 12.462 - } 12.463 12.464 /* init values needed to approximate round-robin for slack time */ 12.465 i = 0; 12.466 if ( waitq_rrobin >= q_len(WAITQ(cpu))) 12.467 waitq_rrobin = 0; 12.468 12.469 - /* Last chance: pick a domain on the wait queue with the XTRA 12.470 - flag set. The NEXT_OPTM field is used to cheaply achieve 12.471 - an approximation of round-robin order */ 12.472 + 12.473 + /* Last chance: pick a domain on the wait queue with the XTRA 12.474 + flag set. The NEXT_OPTM field is used to cheaply achieve 12.475 + an approximation of round-robin order */ 12.476 list_for_each(item, WAITQ(cpu)) 12.477 { 12.478 struct at_dom_info *inf = 12.479 @@ -473,11 +533,11 @@ task_slice_t ksched_scheduler(s_time_t t 12.480 12.481 sdom = inf->owner; 12.482 12.483 - if (inf->xtratime && i >= waitq_rrobin) { 12.484 + if (inf->xtratime && i >= waitq_rrobin) 12.485 + { 12.486 cur_sdom = sdom; 12.487 cur_info = inf; 12.488 newtime = time + BESTEFFORT_QUANTUM; 12.489 - reason = Activation_Reason_Extra; 12.490 waitq_rrobin = i + 1; /* set this value ready for next */ 12.491 goto found; 12.492 } 12.493 @@ -502,7 +562,7 @@ task_slice_t ksched_scheduler(s_time_t t 12.494 /* exhausted its time, cut short the time allocation */ 12.495 if (!list_empty(WAITQ(cpu))) 12.496 { 12.497 - newtime = MIN(newtime, 12.498 + newtime = MIN(newtime, 12.499 DOM_INFO(waitq_el(WAITQ(cpu)->next))->deadline); 12.500 } 12.501 12.502 @@ -512,9 +572,6 @@ task_slice_t ksched_scheduler(s_time_t t 12.503 ret.task = cur_sdom; 12.504 ret.time = newtime - time; 12.505 12.506 - cur_sdom->min_slice = newtime - time; 12.507 - DOM_INFO(cur_sdom)->reason = reason; 12.508 - 12.509 TRACE_1D(0, cur_sdom->domain); 12.510 12.511 return ret; 12.512 @@ -531,8 +588,10 @@ static int at_init_scheduler() 12.513 schedule_data[i].sched_priv = xmalloc(sizeof(struct at_cpu_info)); 12.514 if ( schedule_data[i].sched_priv == NULL ) 12.515 return -1; 12.516 - WAITQ(i)->next = WAITQ(i); 12.517 - WAITQ(i)->prev = WAITQ(i); 12.518 + INIT_LIST_HEAD(WAITQ(i)); 12.519 + INIT_LIST_HEAD(RUNQ(i)); 12.520 + spin_lock_init(&CPU_INFO(i)->runq_lock); 12.521 + spin_lock_init(&CPU_INFO(i)->waitq_lock); 12.522 } 12.523 12.524 dom_info_cache = xmem_cache_create("Atropos dom info", 12.525 @@ -542,13 +601,6 @@ static int at_init_scheduler() 12.526 return 0; 12.527 } 12.528 12.529 -/* dump relevant per-cpu state for a run queue dump */ 12.530 -static void at_dump_cpu_state(int cpu) 12.531 -{ 12.532 - printk("Waitq len: %d Runq len: %d ", 12.533 - q_len(WAITQ(cpu)), 12.534 - q_len(RUNQ(cpu))); 12.535 -} 12.536 12.537 /* print relevant per-domain info for a run queue dump */ 12.538 static void at_dump_runq_el(struct domain *p) 12.539 @@ -558,6 +610,51 @@ static void at_dump_runq_el(struct domai 12.540 } 12.541 12.542 12.543 +/* dump relevant per-cpu state for a run queue dump */ 12.544 +static void at_dump_cpu_state(int cpu) 12.545 +{ 12.546 + struct list_head *list, *queue; 12.547 + int loop = 0; 12.548 + struct at_dom_info *d_inf; 12.549 + struct domain *d; 12.550 + 12.551 + queue = RUNQ(cpu); 12.552 + printk("\nRUNQUEUE rq %lx n: %lx, p: %lx\n", (unsigned long)queue, 12.553 + (unsigned long) queue->next, (unsigned long) queue->prev); 12.554 + 12.555 + list_for_each ( list, queue ) 12.556 + { 12.557 + d_inf = list_entry(list, struct at_dom_info, run_list); 12.558 + d = d_inf->owner; 12.559 + printk("%3d: %d has=%c ", loop++, d->domain, 12.560 + test_bit(DF_RUNNING, &d->flags) ? 'T':'F'); 12.561 + at_dump_runq_el(d); 12.562 + printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time); 12.563 + printk(" l: %lx n: %lx p: %lx\n", 12.564 + (unsigned long)list, (unsigned long)list->next, 12.565 + (unsigned long)list->prev); 12.566 + } 12.567 + 12.568 + 12.569 + queue = WAITQ(cpu); 12.570 + printk("\nWAITQUEUE rq %lx n: %lx, p: %lx\n", (unsigned long)queue, 12.571 + (unsigned long) queue->next, (unsigned long) queue->prev); 12.572 + 12.573 + list_for_each ( list, queue ) 12.574 + { 12.575 + d_inf = list_entry(list, struct at_dom_info, waitq); 12.576 + d = d_inf->owner; 12.577 + printk("%3d: %d has=%c ", loop++, d->domain, 12.578 + test_bit(DF_RUNNING, &d->flags) ? 'T':'F'); 12.579 + at_dump_runq_el(d); 12.580 + printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time); 12.581 + printk(" l: %lx n: %lx p: %lx\n", 12.582 + (unsigned long)list, (unsigned long)list->next, 12.583 + (unsigned long)list->prev); 12.584 + } 12.585 + 12.586 +} 12.587 + 12.588 /* set or fetch domain scheduling parameters */ 12.589 static int at_adjdom(struct domain *p, struct sched_adjdom_cmd *cmd) 12.590 { 12.591 @@ -585,22 +682,6 @@ static int at_adjdom(struct domain *p, s 12.592 return 0; 12.593 } 12.594 12.595 - 12.596 -/** at_alloc_task - allocate private info for a task */ 12.597 -static int at_alloc_task(struct domain *p) 12.598 -{ 12.599 - ASSERT(p != NULL); 12.600 - 12.601 - p->sched_priv = xmem_cache_alloc(dom_info_cache); 12.602 - if( p->sched_priv == NULL ) 12.603 - return -1; 12.604 - 12.605 - memset(p->sched_priv, 0, sizeof(struct at_dom_info)); 12.606 - 12.607 - return 0; 12.608 -} 12.609 - 12.610 - 12.611 /* free memory associated with a task */ 12.612 static void at_free_task(struct domain *p) 12.613 { 12.614 @@ -627,23 +708,20 @@ static int at_prn_state(int state) 12.615 12.616 return ret; 12.617 } 12.618 - 12.619 -#endif /* KAF_KILLED */ 12.620 12.621 struct scheduler sched_atropos_def = { 12.622 .name = "Atropos Soft Real Time Scheduler", 12.623 .opt_name = "atropos", 12.624 .sched_id = SCHED_ATROPOS, 12.625 -#ifdef KAF_KILLED 12.626 .init_scheduler = at_init_scheduler, 12.627 + .init_idle_task = at_init_idle_task, 12.628 .alloc_task = at_alloc_task, 12.629 .add_task = at_add_task, 12.630 .free_task = at_free_task, 12.631 - .wake_up = unblock, 12.632 + .wake = unblock, 12.633 + .sleep = block, 12.634 .do_schedule = ksched_scheduler, 12.635 .adjdom = at_adjdom, 12.636 .dump_cpu_state = at_dump_cpu_state, 12.637 - .dump_runq_el = at_dump_runq_el, 12.638 .prn_state = at_prn_state, 12.639 -#endif /* KAF_KILLED */ 12.640 };
13.1 --- a/xen/common/slab.c Fri Sep 24 13:42:42 2004 +0000 13.2 +++ b/xen/common/slab.c Fri Sep 24 13:43:30 2004 +0000 13.3 @@ -321,6 +321,8 @@ static cache_sizes_t cache_sizes[] = { 13.4 { 4096, NULL}, 13.5 { 8192, NULL}, 13.6 { 16384, NULL}, 13.7 + { 32768, NULL}, 13.8 + { 65536, NULL}, 13.9 { 0, NULL} 13.10 }; 13.11
14.1 --- a/xen/include/asm-x86/mm.h Fri Sep 24 13:42:42 2004 +0000 14.2 +++ b/xen/include/asm-x86/mm.h Fri Sep 24 13:43:30 2004 +0000 14.3 @@ -116,6 +116,8 @@ struct pfn_info 14.4 spin_unlock(&(_dom)->page_alloc_lock); \ 14.5 } while ( 0 ) 14.6 14.7 +#define INVALID_P2M_ENTRY (~0UL) 14.8 + 14.9 extern struct pfn_info *frame_table; 14.10 extern unsigned long frame_table_size; 14.11 extern unsigned long max_page;