direct-io.hg
changeset 883:6cde5e25c56f
bitkeeper revision 1.554 (3fa7a01284xyjU8eM36NSWyFCKOIsQ)
Many files:
More support in the xenolinux guest os for suspending itself.
Many files:
More support in the xenolinux guest os for suspending itself.
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Tue Nov 04 12:48:18 2003 +0000 (2003-11-04) |
parents | bb30fa014b9d |
children | 615324cd0f00 |
files | xen/drivers/block/xen_block.c xen/include/hypervisor-ifs/network.h xen/include/hypervisor-ifs/vbd.h xen/net/dev.c xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_block.c xenolinux-2.4.22-sparse/arch/xeno/drivers/network/network.c xenolinux-2.4.22-sparse/arch/xeno/kernel/setup.c xenolinux-2.4.22-sparse/arch/xeno/mm/init.c xenolinux-2.4.22-sparse/include/asm-xeno/fixmap.h |
line diff
1.1 --- a/xen/drivers/block/xen_block.c Mon Nov 03 17:18:05 2003 +0000 1.2 +++ b/xen/drivers/block/xen_block.c Tue Nov 04 12:48:18 2003 +0000 1.3 @@ -246,6 +246,7 @@ long do_block_io_op(block_io_op_t *u_blo 1.4 { 1.5 long ret = 0; 1.6 block_io_op_t op; 1.7 + struct task_struct *p = current; 1.8 1.9 if (copy_from_user(&op, u_block_io_op, sizeof(op))) 1.10 return -EFAULT; 1.11 @@ -254,17 +255,32 @@ long do_block_io_op(block_io_op_t *u_blo 1.12 1.13 case BLOCK_IO_OP_SIGNAL: 1.14 /* simply indicates there're reqs outstanding => add current to list */ 1.15 - add_to_blkdev_list_tail(current); 1.16 + add_to_blkdev_list_tail(p); 1.17 maybe_trigger_io_schedule(); 1.18 break; 1.19 1.20 case BLOCK_IO_OP_ATTACH_VBD: 1.21 /* attach a VBD to a given domain; caller must be privileged */ 1.22 - if(!IS_PRIV(current)) 1.23 + if( !IS_PRIV(p) ) 1.24 return -EPERM; 1.25 ret = vbd_attach(&op.u.attach_info); 1.26 break; 1.27 1.28 + case BLOCK_IO_OP_RESET: 1.29 + /* Avoid a race with the tasklet. */ 1.30 + remove_from_blkdev_list(p); 1.31 + if ( p->blk_req_cons != p->blk_resp_prod ) 1.32 + { 1.33 + /* Interface isn't quiescent. */ 1.34 + ret = -EINVAL; 1.35 + } 1.36 + else 1.37 + { 1.38 + p->blk_req_cons = p->blk_resp_prod = 0; 1.39 + ret = 0; 1.40 + } 1.41 + break; 1.42 + 1.43 default: 1.44 ret = -ENOSYS; 1.45 }
2.1 --- a/xen/include/hypervisor-ifs/network.h Mon Nov 03 17:18:05 2003 +0000 2.2 +++ b/xen/include/hypervisor-ifs/network.h Tue Nov 04 12:48:18 2003 +0000 2.3 @@ -18,7 +18,7 @@ 2.4 2.5 #define NETOP_PUSH_BUFFERS 0 /* Notify Xen of new buffers on the rings. */ 2.6 #define NETOP_FLUSH_BUFFERS 1 /* Flush all pending request buffers. */ 2.7 - 2.8 +#define NETOP_RESET_RINGS 2 /* Reset ring indexes on a quiescent vif. */ 2.9 2.10 typedef struct tx_req_entry_st 2.11 {
3.1 --- a/xen/include/hypervisor-ifs/vbd.h Mon Nov 03 17:18:05 2003 +0000 3.2 +++ b/xen/include/hypervisor-ifs/vbd.h Tue Nov 04 12:48:18 2003 +0000 3.3 @@ -78,9 +78,9 @@ typedef struct xen_vbd_info 3.4 /* Block I/O trap operations and associated structures. 3.5 */ 3.6 3.7 -#define BLOCK_IO_OP_SIGNAL 0 // let xen know we have work to do 3.8 -#define BLOCK_IO_OP_ATTACH_VBD 1 // attach a VBD to a given domain 3.9 - 3.10 +#define BLOCK_IO_OP_SIGNAL 0 /* let xen know we have work to do */ 3.11 +#define BLOCK_IO_OP_ATTACH_VBD 1 /* attach a VBD to a given domain */ 3.12 +#define BLOCK_IO_OP_RESET 2 /* reset ring indexes on quiescent i/f */ 3.13 3.14 typedef struct _extent { 3.15 u16 raw_device; 3.16 @@ -91,10 +91,10 @@ typedef struct _extent { 3.17 3.18 typedef struct _vbd_attach { 3.19 int domain; 3.20 - u16 mode; // read-only or read-write 3.21 - u16 device; // how this domain refers to this VBD 3.22 - int nr_extents; // number of extents in the VBD 3.23 - extent_t *extents; // pointer to /array/ of extents 3.24 + u16 mode; /* read-only or read-write */ 3.25 + u16 device; /* how this domain refers to this VBD */ 3.26 + int nr_extents; /* number of extents in the VBD */ 3.27 + extent_t *extents; /* pointer to /array/ of extents */ 3.28 } vbd_attach_t; 3.29 3.30 3.31 @@ -103,8 +103,9 @@ typedef struct block_io_op_st 3.32 unsigned long cmd; 3.33 union 3.34 { 3.35 - long signal_val_unused; 3.36 + /* no entry for BLOCK_IO_OP_SIGNAL */ 3.37 vbd_attach_t attach_info; 3.38 + /* no entry for BLOCK_IO_OP_RESET */ 3.39 } 3.40 u; 3.41 } block_io_op_t;
4.1 --- a/xen/net/dev.c Mon Nov 03 17:18:05 2003 +0000 4.2 +++ b/xen/net/dev.c Tue Nov 04 12:48:18 2003 +0000 4.3 @@ -2229,6 +2229,24 @@ long do_net_io_op(unsigned int op, unsig 4.4 ret = flush_bufs_for_vif(vif); 4.5 break; 4.6 4.7 + case NETOP_RESET_RINGS: 4.8 + /* We take the tx_lock to avoid a race with get_tx_bufs. */ 4.9 + spin_lock_irq(&vif->tx_lock); 4.10 + if ( (vif->rx_req_cons != vif->rx_resp_prod) || 4.11 + (vif->tx_req_cons != vif->tx_resp_prod) ) 4.12 + { 4.13 + /* The interface isn't quiescent. */ 4.14 + ret = -EINVAL; 4.15 + } 4.16 + else 4.17 + { 4.18 + vif->rx_req_cons = vif->rx_resp_prod = 0; 4.19 + vif->tx_req_cons = vif->tx_resp_prod = 0; 4.20 + ret = 0; 4.21 + } 4.22 + spin_unlock_irq(&vif->tx_lock); 4.23 + break; 4.24 + 4.25 default: 4.26 ret = -EINVAL; 4.27 break;
5.1 --- a/xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_block.c Mon Nov 03 17:18:05 2003 +0000 5.2 +++ b/xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_block.c Tue Nov 04 12:48:18 2003 +0000 5.3 @@ -14,13 +14,20 @@ typedef unsigned char byte; /* from linu 5.4 #define XLBLK_RESPONSE_IRQ _EVENT_BLKDEV 5.5 #define DEBUG_IRQ _EVENT_DEBUG 5.6 5.7 +#define STATE_ACTIVE 0 5.8 +#define STATE_SUSPENDED 1 5.9 +#define STATE_CLOSED 2 5.10 +static unsigned int state = STATE_SUSPENDED; 5.11 + 5.12 static blk_ring_t *blk_ring; 5.13 static unsigned int resp_cons; /* Response consumer for comms ring. */ 5.14 static unsigned int req_prod; /* Private request producer. */ 5.15 static xen_disk_info_t xlblk_disk_info; 5.16 static int xlblk_control_msg_pending; 5.17 5.18 -#define RING_FULL (BLK_RING_INC(req_prod) == resp_cons) 5.19 +/* We plug the I/O ring if the driver is suspended or if the ring is full. */ 5.20 +#define RING_PLUGGED ((BLK_RING_INC(req_prod) == resp_cons) || \ 5.21 + (state != STATE_ACTIVE)) 5.22 5.23 /* 5.24 * Request queues with outstanding work, but ring is currently full. 5.25 @@ -338,6 +345,9 @@ static int hypervisor_request(unsigned l 5.26 if ( nr_sectors >= (1<<9) ) BUG(); 5.27 if ( (buffer_ma & ((1<<9)-1)) != 0 ) BUG(); 5.28 5.29 + if ( state == STATE_CLOSED ) 5.30 + return 1; 5.31 + 5.32 switch ( operation ) 5.33 { 5.34 case XEN_BLOCK_VBD_CREATE: 5.35 @@ -345,7 +355,7 @@ static int hypervisor_request(unsigned l 5.36 case XEN_BLOCK_PHYSDEV_GRANT: 5.37 case XEN_BLOCK_PHYSDEV_PROBE: 5.38 case XEN_BLOCK_PROBE: 5.39 - if ( RING_FULL ) return 1; 5.40 + if ( RING_PLUGGED ) return 1; 5.41 phys_device = (kdev_t) 0; 5.42 sector_number = 0; 5.43 DISABLE_SCATTERGATHER(); 5.44 @@ -372,7 +382,7 @@ static int hypervisor_request(unsigned l 5.45 DISABLE_SCATTERGATHER(); 5.46 return 0; 5.47 } 5.48 - else if ( RING_FULL ) 5.49 + else if ( RING_PLUGGED ) 5.50 { 5.51 return 1; 5.52 } 5.53 @@ -485,6 +495,9 @@ static void xlblk_response_int(int irq, 5.54 int i; 5.55 unsigned long flags; 5.56 struct buffer_head *bh, *next_bh; 5.57 + 5.58 + if ( state == STATE_CLOSED ) 5.59 + return; 5.60 5.61 spin_lock_irqsave(&io_request_lock, flags); 5.62 5.63 @@ -534,7 +547,7 @@ static void xlblk_response_int(int irq, 5.64 while ( nr_pending != 0 ) 5.65 { 5.66 do_xlblk_request(pending_queues[--nr_pending]); 5.67 - if ( RING_FULL ) break; 5.68 + if ( RING_PLUGGED ) break; 5.69 } 5.70 } 5.71 5.72 @@ -569,17 +582,32 @@ int xenolinux_control_msg(int operation, 5.73 } 5.74 5.75 5.76 +static void reset_xlblk_interface(void) 5.77 +{ 5.78 + block_io_op_t op; 5.79 + 5.80 + xlblk_control_msg_pending = 0; 5.81 + nr_pending = 0; 5.82 + 5.83 + op.cmd = BLOCK_IO_OP_RESET; 5.84 + if ( HYPERVISOR_block_io_op(&op) != 0 ) 5.85 + printk(KERN_ALERT "Possible blkdev trouble: couldn't reset ring\n"); 5.86 + 5.87 + set_fixmap(FIX_BLKRING_BASE, start_info.blk_ring); 5.88 + blk_ring = (blk_ring_t *)fix_to_virt(FIX_BLKRING_BASE); 5.89 + blk_ring->req_prod = blk_ring->resp_prod = resp_cons = req_prod = 0; 5.90 + 5.91 + wmb(); 5.92 + state = STATE_ACTIVE; 5.93 +} 5.94 + 5.95 + 5.96 int __init xlblk_init(void) 5.97 { 5.98 int error; 5.99 5.100 - xlblk_control_msg_pending = 0; 5.101 - nr_pending = 0; 5.102 + reset_xlblk_interface(); 5.103 5.104 - /* This mapping was created early at boot time. */ 5.105 - blk_ring = (blk_ring_t *)fix_to_virt(FIX_BLKRING_BASE); 5.106 - blk_ring->req_prod = blk_ring->resp_prod = resp_cons = req_prod = 0; 5.107 - 5.108 error = request_irq(XLBLK_RESPONSE_IRQ, xlblk_response_int, 5.109 SA_SAMPLE_RANDOM, "blkdev", NULL); 5.110 if ( error ) 5.111 @@ -639,3 +667,29 @@ static void __exit xlblk_cleanup(void) 5.112 module_init(xlblk_init); 5.113 module_exit(xlblk_cleanup); 5.114 #endif 5.115 + 5.116 + 5.117 +void blkdev_suspend(void) 5.118 +{ 5.119 + state = STATE_SUSPENDED; 5.120 + wmb(); 5.121 + 5.122 + while ( resp_cons != blk_ring->req_prod ) 5.123 + { 5.124 + barrier(); 5.125 + current->state = TASK_INTERRUPTIBLE; 5.126 + schedule_timeout(1); 5.127 + } 5.128 + 5.129 + wmb(); 5.130 + state = STATE_CLOSED; 5.131 + wmb(); 5.132 + 5.133 + clear_fixmap(FIX_BLKRING_BASE); 5.134 +} 5.135 + 5.136 + 5.137 +void blkdev_resume(void) 5.138 +{ 5.139 + reset_xlblk_interface(); 5.140 +}
6.1 --- a/xenolinux-2.4.22-sparse/arch/xeno/drivers/network/network.c Mon Nov 03 17:18:05 2003 +0000 6.2 +++ b/xenolinux-2.4.22-sparse/arch/xeno/drivers/network/network.c Tue Nov 04 12:48:18 2003 +0000 6.3 @@ -44,12 +44,6 @@ static void cleanup_module(void); 6.4 6.5 static struct list_head dev_list; 6.6 6.7 -/* 6.8 - * Needed because network_close() is not properly implemented yet. So 6.9 - * an open after a close needs to do much less than the initial open. 6.10 - */ 6.11 -static int opened_once_already = 0; 6.12 - 6.13 struct net_private 6.14 { 6.15 struct list_head list; 6.16 @@ -58,6 +52,7 @@ struct net_private 6.17 struct net_device_stats stats; 6.18 atomic_t tx_entries; 6.19 unsigned int rx_resp_cons, tx_resp_cons, tx_full; 6.20 + unsigned int net_ring_fixmap_idx; 6.21 net_ring_t *net_ring; 6.22 net_idx_t *net_idx; 6.23 spinlock_t tx_lock; 6.24 @@ -65,6 +60,11 @@ struct net_private 6.25 6.26 unsigned int rx_bufs_to_notify; 6.27 6.28 +#define STATE_ACTIVE 0 6.29 +#define STATE_SUSPENDED 1 6.30 +#define STATE_CLOSED 2 6.31 + unsigned int state; 6.32 + 6.33 /* 6.34 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each 6.35 * array is an index into a chain of free entries. 6.36 @@ -103,14 +103,16 @@ static void dbg_network_int(int irq, voi 6.37 static int network_open(struct net_device *dev) 6.38 { 6.39 struct net_private *np = dev->priv; 6.40 - int i, error = 0; 6.41 + int i; 6.42 + 6.43 + if ( HYPERVISOR_net_io_op(NETOP_RESET_RINGS, np->idx) != 0 ) 6.44 + printk(KERN_ALERT "Possible net trouble: couldn't reset ring idxs\n"); 6.45 6.46 - if ( opened_once_already ) 6.47 - { 6.48 - memset(&np->stats, 0, sizeof(np->stats)); 6.49 - netif_start_queue(dev); 6.50 - return 0; 6.51 - } 6.52 + set_fixmap(FIX_NETRING0_BASE + np->net_ring_fixmap_idx, 6.53 + start_info.net_rings[np->idx]); 6.54 + np->net_ring = (net_ring_t *)fix_to_virt( 6.55 + FIX_NETRING0_BASE + np->net_ring_fixmap_idx); 6.56 + np->net_idx = &HYPERVISOR_shared_info->net_idx[np->idx]; 6.57 6.58 np->rx_bufs_to_notify = 0; 6.59 np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0; 6.60 @@ -126,38 +128,16 @@ static int network_open(struct net_devic 6.61 for ( i = 0; i < RX_RING_SIZE; i++ ) 6.62 np->rx_skbs[i] = (void *)(i+1); 6.63 6.64 - error = request_irq(NET_IRQ, network_interrupt, 6.65 - SA_SAMPLE_RANDOM, "network", dev); 6.66 - if ( error ) 6.67 - { 6.68 - printk(KERN_WARNING "%s: Could not allocate network interrupt\n", 6.69 - dev->name); 6.70 - goto fail; 6.71 - } 6.72 - 6.73 - error = request_irq(_EVENT_DEBUG, dbg_network_int, SA_SHIRQ, 6.74 - "debug", dev); 6.75 - if ( error ) 6.76 - { 6.77 - printk(KERN_WARNING "%s: Non-fatal error -- no debug interrupt\n", 6.78 - dev->name); 6.79 - } 6.80 + wmb(); 6.81 + np->state = STATE_ACTIVE; 6.82 6.83 network_alloc_rx_buffers(dev); 6.84 6.85 - printk("XenoLinux Virtual Network Driver installed as %s\n", dev->name); 6.86 - 6.87 netif_start_queue(dev); 6.88 6.89 MOD_INC_USE_COUNT; 6.90 6.91 - opened_once_already = 1; 6.92 - 6.93 return 0; 6.94 - 6.95 - fail: 6.96 - kfree(np); 6.97 - return error; 6.98 } 6.99 6.100 6.101 @@ -192,7 +172,8 @@ static void network_tx_buf_gc(struct net 6.102 if ( np->tx_full && (atomic_read(&np->tx_entries) < TX_MAX_ENTRIES) ) 6.103 { 6.104 np->tx_full = 0; 6.105 - netif_wake_queue(dev); 6.106 + if ( np->state == STATE_ACTIVE ) 6.107 + netif_wake_queue(dev); 6.108 } 6.109 } 6.110 6.111 @@ -214,7 +195,8 @@ static void network_alloc_rx_buffers(str 6.112 struct sk_buff *skb; 6.113 unsigned int end = RX_RING_ADD(np->rx_resp_cons, RX_MAX_ENTRIES); 6.114 6.115 - if ( (i = np->net_idx->rx_req_prod) == end ) 6.116 + if ( ((i = np->net_idx->rx_req_prod) == end) || 6.117 + (np->state != STATE_ACTIVE) ) 6.118 return; 6.119 6.120 do { 6.121 @@ -312,14 +294,16 @@ static int network_start_xmit(struct sk_ 6.122 } 6.123 6.124 6.125 -static void network_interrupt(int irq, void *dev_id, struct pt_regs *ptregs) 6.126 +static inline void _network_interrupt(struct net_device *dev) 6.127 { 6.128 + struct net_private *np = dev->priv; 6.129 unsigned int i; 6.130 unsigned long flags; 6.131 - struct net_device *dev = (struct net_device *)dev_id; 6.132 - struct net_private *np = dev->priv; 6.133 struct sk_buff *skb; 6.134 rx_resp_entry_t *rx; 6.135 + 6.136 + if ( np->state == STATE_CLOSED ) 6.137 + return; 6.138 6.139 spin_lock_irqsave(&np->tx_lock, flags); 6.140 network_tx_buf_gc(dev); 6.141 @@ -375,9 +359,46 @@ static void network_interrupt(int irq, v 6.142 } 6.143 6.144 6.145 +static void network_interrupt(int irq, void *unused, struct pt_regs *ptregs) 6.146 +{ 6.147 + struct list_head *ent; 6.148 + struct net_private *np; 6.149 + list_for_each ( ent, &dev_list ) 6.150 + { 6.151 + np = list_entry(ent, struct net_private, list); 6.152 + _network_interrupt(np->dev); 6.153 + } 6.154 +} 6.155 + 6.156 + 6.157 int network_close(struct net_device *dev) 6.158 { 6.159 - netif_stop_queue(dev); 6.160 + struct net_private *np = dev->priv; 6.161 + 6.162 + np->state = STATE_SUSPENDED; 6.163 + wmb(); 6.164 + 6.165 + netif_stop_queue(np->dev); 6.166 + 6.167 + HYPERVISOR_net_io_op(NETOP_FLUSH_BUFFERS, np->idx); 6.168 + 6.169 + while ( (np->rx_resp_cons != np->net_idx->rx_req_prod) || 6.170 + (np->tx_resp_cons != np->net_idx->tx_req_prod) ) 6.171 + { 6.172 + barrier(); 6.173 + current->state = TASK_INTERRUPTIBLE; 6.174 + schedule_timeout(1); 6.175 + } 6.176 + 6.177 + wmb(); 6.178 + np->state = STATE_CLOSED; 6.179 + wmb(); 6.180 + 6.181 + /* Now no longer safe to take interrupts for this device. */ 6.182 + clear_fixmap(FIX_NETRING0_BASE + np->net_ring_fixmap_idx); 6.183 + 6.184 + MOD_DEC_USE_COUNT; 6.185 + 6.186 return 0; 6.187 } 6.188 6.189 @@ -471,6 +492,18 @@ int __init init_module(void) 6.190 if ( start_info.dom_id == 0 ) 6.191 (void)register_inetaddr_notifier(¬ifier_inetdev); 6.192 6.193 + err = request_irq(NET_IRQ, network_interrupt, 6.194 + SA_SAMPLE_RANDOM, "network", NULL); 6.195 + if ( err ) 6.196 + { 6.197 + printk(KERN_WARNING "Could not allocate network interrupt\n"); 6.198 + goto fail; 6.199 + } 6.200 + 6.201 + err = request_irq(_EVENT_DEBUG, dbg_network_int, SA_SHIRQ, "debug", NULL); 6.202 + if ( err ) 6.203 + printk(KERN_WARNING "Non-fatal error -- no debug interrupt\n"); 6.204 + 6.205 for ( i = 0; i < MAX_DOMAIN_VIFS; i++ ) 6.206 { 6.207 if ( start_info.net_rings[i] == 0 ) 6.208 @@ -487,12 +520,10 @@ int __init init_module(void) 6.209 goto fail; 6.210 } 6.211 6.212 - set_fixmap(FIX_NETRING0_BASE+fixmap_idx, start_info.net_rings[i]); 6.213 - 6.214 np = dev->priv; 6.215 - np->net_ring = (net_ring_t *)fix_to_virt(FIX_NETRING0_BASE+fixmap_idx); 6.216 - np->net_idx = &HYPERVISOR_shared_info->net_idx[i]; 6.217 - np->idx = i; 6.218 + np->state = STATE_CLOSED; 6.219 + np->net_ring_fixmap_idx = fixmap_idx; 6.220 + np->idx = i; 6.221 6.222 SET_MODULE_OWNER(dev); 6.223 dev->open = network_open;
7.1 --- a/xenolinux-2.4.22-sparse/arch/xeno/kernel/setup.c Mon Nov 03 17:18:05 2003 +0000 7.2 +++ b/xenolinux-2.4.22-sparse/arch/xeno/kernel/setup.c Tue Nov 04 12:48:18 2003 +0000 7.3 @@ -45,6 +45,8 @@ 7.4 #include <asm/mmu_context.h> 7.5 #include <asm/hypervisor.h> 7.6 #include <asm/hypervisor-ifs/dom0_ops.h> 7.7 +#include <linux/netdevice.h> 7.8 +#include <linux/tqueue.h> 7.9 7.10 /* 7.11 * Point at the empty zero page to start with. We map the real shared_info 7.12 @@ -1035,33 +1037,82 @@ void __init cpu_init (void) 7.13 * Time-to-die callback handling. 7.14 */ 7.15 7.16 -static void time_to_die(int irq, void *unused, struct pt_regs *regs) 7.17 +static void die_irq(int irq, void *unused, struct pt_regs *regs) 7.18 { 7.19 extern void ctrl_alt_del(void); 7.20 ctrl_alt_del(); 7.21 } 7.22 7.23 -static int __init setup_death_event(void) 7.24 +static int __init setup_die_event(void) 7.25 { 7.26 - (void)request_irq(_EVENT_DIE, time_to_die, 0, "die", NULL); 7.27 + (void)request_irq(_EVENT_DIE, die_irq, 0, "die", NULL); 7.28 return 0; 7.29 } 7.30 7.31 -__initcall(setup_death_event); 7.32 +__initcall(setup_die_event); 7.33 7.34 7.35 /****************************************************************************** 7.36 * Stop/pickle callback handling. 7.37 */ 7.38 7.39 -static void time_to_stop(int irq, void *unused, struct pt_regs *regs) 7.40 +static void stop_task(void *unused) 7.41 { 7.42 + /* Hmmm... a cleaner interface to suspend/resume blkdevs would be nice. */ 7.43 + extern void blkdev_suspend(void); 7.44 + extern void blkdev_resume(void); 7.45 + 7.46 + struct net_device *dev; 7.47 + char name[6]; 7.48 + int i; 7.49 + 7.50 + /* Close down all Ethernet interfaces. */ 7.51 + for ( i = 0; i < 10; i++ ) 7.52 + { 7.53 + sprintf(name, "eth%d", i); 7.54 + if ( (dev = dev_get_by_name(name)) == NULL ) 7.55 + continue; 7.56 + dev_close(dev); 7.57 + dev_put(dev); 7.58 + } 7.59 + 7.60 + blkdev_suspend(); 7.61 + 7.62 + __cli(); 7.63 + 7.64 + clear_fixmap(FIX_SHARED_INFO); 7.65 + 7.66 HYPERVISOR_stop(); 7.67 + 7.68 + set_fixmap(FIX_SHARED_INFO, start_info.shared_info); 7.69 + HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO); 7.70 + 7.71 + __sti(); 7.72 + 7.73 + blkdev_resume(); 7.74 + 7.75 + /* Bring up all Ethernet interfaces. */ 7.76 + for ( i = 0; i < 10; i++ ) 7.77 + { 7.78 + sprintf(name, "eth%d", i); 7.79 + if ( (dev = dev_get_by_name(name)) == NULL ) 7.80 + continue; 7.81 + dev_open(dev); 7.82 + dev_put(dev); 7.83 + } 7.84 +} 7.85 + 7.86 +static struct tq_struct stop_tq; 7.87 + 7.88 +static void stop_irq(int irq, void *unused, struct pt_regs *regs) 7.89 +{ 7.90 + stop_tq.routine = stop_task; 7.91 + schedule_task(&stop_tq); 7.92 } 7.93 7.94 static int __init setup_stop_event(void) 7.95 { 7.96 - (void)request_irq(_EVENT_STOP, time_to_stop, 0, "stop", NULL); 7.97 + (void)request_irq(_EVENT_STOP, stop_irq, 0, "stop", NULL); 7.98 return 0; 7.99 } 7.100
8.1 --- a/xenolinux-2.4.22-sparse/arch/xeno/mm/init.c Mon Nov 03 17:18:05 2003 +0000 8.2 +++ b/xenolinux-2.4.22-sparse/arch/xeno/mm/init.c Tue Nov 04 12:48:18 2003 +0000 8.3 @@ -95,9 +95,8 @@ extern char _text, _etext, _edata, __bss 8.4 extern char __init_begin, __init_end; 8.5 8.6 static inline void set_pte_phys (unsigned long vaddr, 8.7 - unsigned long phys, pgprot_t flags) 8.8 + unsigned long phys, pgprot_t prot) 8.9 { 8.10 - pgprot_t prot; 8.11 pgd_t *pgd; 8.12 pmd_t *pmd; 8.13 pte_t *pte; 8.14 @@ -117,8 +116,6 @@ static inline void set_pte_phys (unsigne 8.15 if (pte_val(*pte)) 8.16 pte_ERROR(*pte); 8.17 8.18 - pgprot_val(prot) = pgprot_val(PAGE_KERNEL) | pgprot_val(flags); 8.19 - 8.20 /* We queue directly, avoiding hidden phys->machine translation. */ 8.21 queue_l1_entry_update(pte, phys | pgprot_val(prot)); 8.22 8.23 @@ -129,8 +126,8 @@ static inline void set_pte_phys (unsigne 8.24 __flush_tlb_one(vaddr); 8.25 } 8.26 8.27 -void __set_fixmap (enum fixed_addresses idx, unsigned long phys, 8.28 - pgprot_t flags) 8.29 +void __set_fixmap(enum fixed_addresses idx, unsigned long phys, 8.30 + pgprot_t flags) 8.31 { 8.32 unsigned long address = __fix_to_virt(idx); 8.33 8.34 @@ -138,7 +135,13 @@ void __set_fixmap (enum fixed_addresses 8.35 printk("Invalid __set_fixmap\n"); 8.36 return; 8.37 } 8.38 - set_pte_phys(address, phys, flags); 8.39 + set_pte_phys(address, phys, 8.40 + __pgprot(pgprot_val(PAGE_KERNEL)|pgprot_val(flags))); 8.41 +} 8.42 + 8.43 +void clear_fixmap(enum fixed_addresses idx) 8.44 +{ 8.45 + set_pte_phys(__fix_to_virt(idx), 0, __pgprot(0)); 8.46 } 8.47 8.48 static void __init fixrange_init (unsigned long start, 8.49 @@ -229,9 +232,6 @@ void __init paging_init(void) 8.50 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 8.51 fixrange_init(vaddr, HYPERVISOR_VIRT_START, init_mm.pgd); 8.52 8.53 - /* Cheesy: this can probably be moved to the blkdev driver. */ 8.54 - set_fixmap(FIX_BLKRING_BASE, start_info.blk_ring); 8.55 - 8.56 /* Switch to the real shared_info page, and clear the dummy page. */ 8.57 set_fixmap(FIX_SHARED_INFO, start_info.shared_info); 8.58 HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
9.1 --- a/xenolinux-2.4.22-sparse/include/asm-xeno/fixmap.h Mon Nov 03 17:18:05 2003 +0000 9.2 +++ b/xenolinux-2.4.22-sparse/include/asm-xeno/fixmap.h Tue Nov 04 12:48:18 2003 +0000 9.3 @@ -72,6 +72,9 @@ extern void __set_fixmap (enum fixed_add 9.4 */ 9.5 #define set_fixmap_nocache(idx, phys) \ 9.6 __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) 9.7 + 9.8 +extern void clear_fixmap(enum fixed_addresses idx); 9.9 + 9.10 /* 9.11 * used by vmalloc.c. 9.12 *