ia64/xen-unstable
changeset 1127:9ed81ff882d4
bitkeeper revision 1.749.1.1 (403cd19e9cL26IazEdGTvx0tHxbYqw)
event_channel.h:
new file
sched.h, hypervisor-if.h, event_channel.c, domain.c, apic.c:
Rewritten event-channel code.
event_channel.h:
new file
sched.h, hypervisor-if.h, event_channel.c, domain.c, apic.c:
Rewritten event-channel code.
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Wed Feb 25 16:47:26 2004 +0000 (2004-02-25) |
parents | edb575655020 |
children | 74915dc117d5 6baeead2cccd |
files | .rootkeys xen/arch/i386/apic.c xen/common/domain.c xen/common/event_channel.c xen/include/hypervisor-ifs/event_channel.h xen/include/hypervisor-ifs/hypervisor-if.h xen/include/xeno/sched.h |
line diff
1.1 --- a/.rootkeys Wed Feb 25 13:24:01 2004 +0000 1.2 +++ b/.rootkeys Wed Feb 25 16:47:26 2004 +0000 1.3 @@ -417,6 +417,7 @@ 400304fcmRQmDdFYEzDh0wcBba9alg xen/inclu 1.4 3ddb79c2YTaZwOqWin9-QNgHge5RVw xen/include/hypervisor-ifs/block.h 1.5 3ddb79c2PMeWTK86y4C3F4MzHw4A1g xen/include/hypervisor-ifs/dom0_ops.h 1.6 3e6377eaioRoNm0m_HSDEAd4Vqrq_w xen/include/hypervisor-ifs/dom_mem_ops.h 1.7 +403cd194j2pyLqXD8FJ-ukvZzkPenw xen/include/hypervisor-ifs/event_channel.h 1.8 3ddb79c25UE59iu4JJcbRalx95mvcg xen/include/hypervisor-ifs/hypervisor-if.h 1.9 3ead095dE_VF-QA88rl_5cWYRWtRVQ xen/include/hypervisor-ifs/kbd.h 1.10 3ddb79c2oRPrzClk3zbTkRHlpumzKA xen/include/hypervisor-ifs/network.h
2.1 --- a/xen/arch/i386/apic.c Wed Feb 25 13:24:01 2004 +0000 2.2 +++ b/xen/arch/i386/apic.c Wed Feb 25 16:47:26 2004 +0000 2.3 @@ -659,7 +659,6 @@ void __init setup_APIC_clocks (void) 2.4 */ 2.5 int reprogram_ac_timer(s_time_t timeout) 2.6 { 2.7 - int cpu = smp_processor_id(); 2.8 s_time_t now; 2.9 s_time_t expire; 2.10 u64 apic_tmict; 2.11 @@ -669,7 +668,8 @@ int reprogram_ac_timer(s_time_t timeout) 2.12 * cause an immediate interrupt). At least this is guaranteed to hold it 2.13 * off for ages (esp. since the clock ticks on bus clock, not cpu clock!). 2.14 */ 2.15 - if (timeout == 0) { 2.16 + if ( timeout == 0 ) 2.17 + { 2.18 apic_tmict = 0xffffffff; 2.19 goto reprogram; 2.20 } 2.21 @@ -677,10 +677,12 @@ int reprogram_ac_timer(s_time_t timeout) 2.22 now = NOW(); 2.23 expire = timeout - now; /* value from now */ 2.24 2.25 - if (expire <= 0) { 2.26 + if ( expire <= 0 ) 2.27 + { 2.28 Dprintk("APICT[%02d] Timeout in the past 0x%08X%08X > 0x%08X%08X\n", 2.29 - cpu, (u32)(now>>32), (u32)now, (u32)(timeout>>32),(u32)timeout); 2.30 - return 0; /* timeout value in the past */ 2.31 + smp_processor_id(), (u32)(now>>32), 2.32 + (u32)now, (u32)(timeout>>32),(u32)timeout); 2.33 + return 0; 2.34 } 2.35 2.36 /* 2.37 @@ -693,12 +695,15 @@ int reprogram_ac_timer(s_time_t timeout) 2.38 /* conversion to bus units */ 2.39 apic_tmict = (((u64)bus_scale) * expire)>>18; 2.40 2.41 - if (apic_tmict >= 0xffffffff) { 2.42 - Dprintk("APICT[%02d] Timeout value too large\n", cpu); 2.43 + if ( apic_tmict >= 0xffffffff ) 2.44 + { 2.45 + Dprintk("APICT[%02d] Timeout value too large\n", smp_processor_id()); 2.46 apic_tmict = 0xffffffff; 2.47 } 2.48 - if (apic_tmict == 0) { 2.49 - Dprintk("APICT[%02d] timeout value too small\n", cpu); 2.50 + 2.51 + if ( apic_tmict == 0 ) 2.52 + { 2.53 + Dprintk("APICT[%02d] timeout value too small\n", smp_processor_id()); 2.54 return 0; 2.55 } 2.56
3.1 --- a/xen/common/domain.c Wed Feb 25 13:24:01 2004 +0000 3.2 +++ b/xen/common/domain.c Wed Feb 25 16:47:26 2004 +0000 3.3 @@ -127,6 +127,8 @@ void kill_domain_with_errmsg(const char 3.4 3.5 void __kill_domain(struct task_struct *p) 3.6 { 3.7 + extern void destroy_event_channels(struct task_struct *); 3.8 + 3.9 int i; 3.10 struct task_struct **pp; 3.11 unsigned long flags; 3.12 @@ -149,6 +151,8 @@ void __kill_domain(struct task_struct *p 3.13 for ( i = 0; i < MAX_DOMAIN_VIFS; i++ ) 3.14 unlink_net_vif(p->net_vif_list[i]); 3.15 3.16 + destroy_event_channels(p); 3.17 + 3.18 /* 3.19 * Note this means that find_domain_by_id may fail, even when the caller 3.20 * holds a reference to the domain being queried. Take care! 3.21 @@ -467,8 +471,6 @@ unsigned int alloc_new_dom_mem(struct ta 3.22 /* Release resources belonging to task @p. */ 3.23 void release_task(struct task_struct *p) 3.24 { 3.25 - extern void destroy_event_channels(struct task_struct *); 3.26 - 3.27 ASSERT(p->state == TASK_DYING); 3.28 ASSERT(!p->has_cpu); 3.29 3.30 @@ -481,7 +483,6 @@ void release_task(struct task_struct *p) 3.31 destroy_blkdev_info(p); 3.32 3.33 /* Free all memory associated with this domain. */ 3.34 - destroy_event_channels(p); 3.35 free_page((unsigned long)p->mm.perdomain_pt); 3.36 UNSHARE_PFN(virt_to_page(p->shared_info)); 3.37 free_all_dom_mem(p);
4.1 --- a/xen/common/event_channel.c Wed Feb 25 13:24:01 2004 +0000 4.2 +++ b/xen/common/event_channel.c Wed Feb 25 16:47:26 2004 +0000 4.3 @@ -3,7 +3,7 @@ 4.4 * 4.5 * Event channels between domains. 4.6 * 4.7 - * Copyright (c) 2003, K A Fraser. 4.8 + * Copyright (c) 2003-2004, K A Fraser. 4.9 * 4.10 * This program is distributed in the hope that it will be useful, 4.11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 4.12 @@ -22,135 +22,147 @@ 4.13 #include <xeno/sched.h> 4.14 #include <xeno/event.h> 4.15 4.16 +#include <hypervisor-ifs/hypervisor-if.h> 4.17 +#include <hypervisor-ifs/event_channel.h> 4.18 + 4.19 #define MAX_EVENT_CHANNELS 1024 4.20 4.21 -static long event_channel_open(u16 target_dom) 4.22 +static int get_free_port(struct task_struct *p) 4.23 { 4.24 - struct task_struct *lp = current, *rp; 4.25 - int i, lmax, rmax, lid, rid; 4.26 - event_channel_t *lchn, *rchn; 4.27 - shared_info_t *rsi; 4.28 + int max, port; 4.29 + event_channel_t *chn; 4.30 + 4.31 + max = p->max_event_channel; 4.32 + chn = p->event_channel; 4.33 + 4.34 + for ( port = 0; port < max; port++ ) 4.35 + if ( chn[port].state == ECS_FREE ) 4.36 + break; 4.37 + 4.38 + if ( port == max ) 4.39 + { 4.40 + if ( max == MAX_EVENT_CHANNELS ) 4.41 + return -ENOSPC; 4.42 + 4.43 + max = (max == 0) ? 4 : (max * 2); 4.44 + 4.45 + chn = kmalloc(max * sizeof(event_channel_t), GFP_KERNEL); 4.46 + if ( unlikely(chn == NULL) ) 4.47 + return -ENOMEM; 4.48 + 4.49 + memset(chn, 0, max * sizeof(event_channel_t)); 4.50 + 4.51 + if ( p->event_channel != NULL ) 4.52 + { 4.53 + memcpy(chn, p->event_channel, (max/2) * sizeof(event_channel_t)); 4.54 + kfree(p->event_channel); 4.55 + } 4.56 + 4.57 + p->event_channel = chn; 4.58 + p->max_event_channel = max; 4.59 + } 4.60 + 4.61 + return port; 4.62 +} 4.63 + 4.64 +static inline unsigned long set_event_pending(struct task_struct *p, int port) 4.65 +{ 4.66 + if ( !test_and_set_bit(port, &p->shared_info->event_channel_pend[0]) && 4.67 + !test_and_set_bit(port>>5, &p->shared_info->event_channel_pend_sel) ) 4.68 + return mark_guest_event(p, _EVENT_EVTCHN); 4.69 + return 0; 4.70 +} 4.71 + 4.72 +static inline unsigned long set_event_disc(struct task_struct *p, int port) 4.73 +{ 4.74 + if ( !test_and_set_bit(port, &p->shared_info->event_channel_disc[0]) && 4.75 + !test_and_set_bit(port>>5, &p->shared_info->event_channel_disc_sel) ) 4.76 + return mark_guest_event(p, _EVENT_EVTCHN); 4.77 + return 0; 4.78 +} 4.79 + 4.80 +static long event_channel_open(evtchn_open_t *open) 4.81 +{ 4.82 + struct task_struct *lp, *rp; 4.83 + int lport = 0, rport = 0; 4.84 unsigned long cpu_mask; 4.85 + domid_t ldom = open->local_dom, rdom = open->remote_dom; 4.86 long rc = 0; 4.87 4.88 - rp = find_domain_by_id(target_dom); 4.89 + if ( !IS_PRIV(current) ) 4.90 + return -EPERM; 4.91 + 4.92 + /* 'local_dom' may be DOMID_SELF. 'remote_dom' cannot be.*/ 4.93 + if ( ldom == DOMID_SELF ) 4.94 + ldom = current->domain; 4.95 + 4.96 + /* Event channel must connect distinct domains. */ 4.97 + if ( ldom == rdom ) 4.98 + return -EINVAL; 4.99 4.100 - /* 4.101 - * We need locks at both ends to make a connection. We avoid deadlock 4.102 - * by acquiring the locks in address order. 4.103 - */ 4.104 - if ( (unsigned long)lp < (unsigned long)rp ) 4.105 + if ( ((lp = find_domain_by_id(ldom)) == NULL) || 4.106 + ((rp = find_domain_by_id(rdom)) == NULL) ) 4.107 + { 4.108 + if ( lp != NULL ) 4.109 + put_task_struct(lp); 4.110 + return -ESRCH; 4.111 + } 4.112 + 4.113 + /* Avoid deadlock by first acquiring lock of domain with smaller id. */ 4.114 + if ( ldom < rdom ) 4.115 { 4.116 spin_lock(&lp->event_channel_lock); 4.117 spin_lock(&rp->event_channel_lock); 4.118 } 4.119 else 4.120 { 4.121 - if ( likely(rp != NULL) ) 4.122 - spin_lock(&rp->event_channel_lock); 4.123 + spin_lock(&rp->event_channel_lock); 4.124 spin_lock(&lp->event_channel_lock); 4.125 } 4.126 4.127 - lmax = lp->max_event_channel; 4.128 - lchn = lp->event_channel; 4.129 - lid = -1; 4.130 - 4.131 - /* 4.132 - * Find the first unused event channel. Also ensure bo channel already 4.133 - * exists to the specified target domain. 4.134 - */ 4.135 - for ( i = 0; i < lmax; i++ ) 4.136 - { 4.137 - if ( (lid == -1) && !(lchn[i].flags & ECF_INUSE) ) 4.138 - { 4.139 - lid = i; 4.140 - } 4.141 - else if ( unlikely(lchn[i].target_dom == target_dom) ) 4.142 - { 4.143 - rc = -EEXIST; 4.144 - goto out; 4.145 - } 4.146 - } 4.147 - 4.148 - /* If there is no free slot we need to allocate a bigger channel list. */ 4.149 - if ( unlikely(lid == -1) ) 4.150 + if ( (lport = get_free_port(lp)) < 0 ) 4.151 { 4.152 - /* Reached maximum channel count? */ 4.153 - if ( unlikely(lmax == MAX_EVENT_CHANNELS) ) 4.154 - { 4.155 - rc = -ENOSPC; 4.156 - goto out; 4.157 - } 4.158 - 4.159 - lmax = (lmax == 0) ? 4 : (lmax * 2); 4.160 - 4.161 - lchn = kmalloc(lmax * sizeof(event_channel_t), GFP_KERNEL); 4.162 - if ( unlikely(lchn == NULL) ) 4.163 - { 4.164 - rc = -ENOMEM; 4.165 - goto out; 4.166 - } 4.167 + rc = lport; 4.168 + goto out; 4.169 + } 4.170 4.171 - memset(lchn, 0, lmax * sizeof(event_channel_t)); 4.172 - 4.173 - if ( likely(lp->event_channel != NULL) ) 4.174 - kfree(lp->event_channel); 4.175 - 4.176 - lp->event_channel = lchn; 4.177 - lp->max_event_channel = lmax; 4.178 + if ( (rport = get_free_port(rp)) < 0 ) 4.179 + { 4.180 + rc = rport; 4.181 + goto out; 4.182 } 4.183 4.184 - lchn[lid].target_dom = target_dom; 4.185 - lchn[lid].flags = ECF_INUSE; 4.186 + lp->event_channel[lport].remote_dom = rp; 4.187 + lp->event_channel[lport].remote_port = (u16)rport; 4.188 + lp->event_channel[lport].state = ECS_CONNECTED; 4.189 4.190 - if ( likely(rp != NULL) ) 4.191 - { 4.192 - rchn = rp->event_channel; 4.193 - rmax = rp->max_event_channel; 4.194 - 4.195 - for ( rid = 0; rid < rmax; rid++ ) 4.196 - { 4.197 - if ( (rchn[rid].target_dom == lp->domain) && 4.198 - (rchn[rid].flags & ECF_INUSE) ) 4.199 - { 4.200 - /* 4.201 - * The target was awaiting a connection. We make the connection 4.202 - * and send a connection-made event to the remote end. 4.203 - */ 4.204 - rchn[rid].flags = ECF_INUSE | ECF_CONNECTED | lid; 4.205 - lchn[lid].flags = ECF_INUSE | ECF_CONNECTED | rid; 4.206 + rp->event_channel[rport].remote_dom = lp; 4.207 + rp->event_channel[rport].remote_port = (u16)lport; 4.208 + rp->event_channel[rport].state = ECS_CONNECTED; 4.209 4.210 - rsi = rp->shared_info; 4.211 - if ( !test_and_set_bit(rid, &rsi->event_channel_pend[0]) && 4.212 - !test_and_set_bit(rid>>5, &rsi->event_channel_pend_sel) ) 4.213 - { 4.214 - cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN); 4.215 - guest_event_notify(cpu_mask); 4.216 - } 4.217 - 4.218 - break; 4.219 - } 4.220 - } 4.221 - } 4.222 + cpu_mask = set_event_pending(lp, lport); 4.223 + cpu_mask |= set_event_pending(rp, rport); 4.224 + guest_event_notify(cpu_mask); 4.225 4.226 out: 4.227 spin_unlock(&lp->event_channel_lock); 4.228 - if ( rp != NULL ) 4.229 - { 4.230 - spin_unlock(&rp->event_channel_lock); 4.231 - put_task_struct(rp); 4.232 - } 4.233 + spin_unlock(&rp->event_channel_lock); 4.234 + 4.235 + put_task_struct(lp); 4.236 + put_task_struct(rp); 4.237 + 4.238 + open->local_port = lport; 4.239 + open->remote_port = rport; 4.240 4.241 return rc; 4.242 } 4.243 4.244 4.245 -static long event_channel_close(u16 lid) 4.246 +static long __event_channel_close(struct task_struct *lp, int lport) 4.247 { 4.248 - struct task_struct *lp = current, *rp = NULL; 4.249 + struct task_struct *rp = NULL; 4.250 event_channel_t *lchn, *rchn; 4.251 - u16 rid; 4.252 - shared_info_t *rsi; 4.253 + int rport; 4.254 unsigned long cpu_mask; 4.255 long rc = 0; 4.256 4.257 @@ -159,21 +171,21 @@ static long event_channel_close(u16 lid) 4.258 4.259 lchn = lp->event_channel; 4.260 4.261 - if ( unlikely(lid >= lp->max_event_channel) || 4.262 - unlikely(!(lchn[lid].flags & ECF_INUSE)) ) 4.263 + if ( (lport < 0) || (lport >= lp->max_event_channel) || 4.264 + (lchn[lport].state == ECS_FREE) ) 4.265 { 4.266 rc = -EINVAL; 4.267 goto out; 4.268 } 4.269 4.270 - if ( lchn[lid].flags & ECF_CONNECTED ) 4.271 + if ( lchn[lport].state == ECS_CONNECTED ) 4.272 { 4.273 if ( rp == NULL ) 4.274 { 4.275 - rp = find_domain_by_id(lchn[lid].target_dom); 4.276 - ASSERT(rp != NULL); 4.277 - 4.278 - if ( (unsigned long)lp < (unsigned long)rp ) 4.279 + rp = lchn[lport].remote_dom; 4.280 + get_task_struct(rp); 4.281 + 4.282 + if ( lp->domain < rp->domain ) 4.283 { 4.284 spin_lock(&rp->event_channel_lock); 4.285 } 4.286 @@ -184,34 +196,39 @@ static long event_channel_close(u16 lid) 4.287 goto again; 4.288 } 4.289 } 4.290 - else if ( rp->domain != lchn[lid].target_dom ) 4.291 + else if ( rp != lchn[lport].remote_dom ) 4.292 { 4.293 rc = -EINVAL; 4.294 goto out; 4.295 } 4.296 4.297 - rchn = rp->event_channel; 4.298 - rid = lchn[lid].flags & ECF_TARGET_ID; 4.299 - ASSERT(rid < rp->max_event_channel); 4.300 - ASSERT(rchn[rid].flags == (ECF_INUSE | ECF_CONNECTED | lid)); 4.301 - ASSERT(rchn[rid].target_dom == lp->domain); 4.302 - 4.303 - rchn[rid].flags = ECF_INUSE; 4.304 + rchn = rp->event_channel; 4.305 + rport = lchn[lport].remote_port; 4.306 4.307 - rsi = rp->shared_info; 4.308 - if ( !test_and_set_bit(rid, &rsi->event_channel_disc[0]) && 4.309 - !test_and_set_bit(rid>>5, &rsi->event_channel_disc_sel) ) 4.310 - { 4.311 - cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN); 4.312 - guest_event_notify(cpu_mask); 4.313 - } 4.314 + if ( rport >= rp->max_event_channel ) 4.315 + BUG(); 4.316 + if ( rchn[rport].state != ECS_CONNECTED ) 4.317 + BUG(); 4.318 + if ( rchn[rport].remote_dom != lp ) 4.319 + BUG(); 4.320 + 4.321 + rchn[rport].state = ECS_ZOMBIE; 4.322 + rchn[rport].remote_dom = NULL; 4.323 + rchn[rport].remote_port = 0xFFFF; 4.324 + 4.325 + cpu_mask = set_event_disc(lp, lport); 4.326 + cpu_mask |= set_event_disc(rp, rport); 4.327 + guest_event_notify(cpu_mask); 4.328 } 4.329 4.330 - lchn[lid].target_dom = 0; 4.331 - lchn[lid].flags = 0; 4.332 + lchn[lport].state = ECS_FREE; 4.333 + lchn[lport].remote_dom = NULL; 4.334 + lchn[lport].remote_port = 0xFFFF; 4.335 4.336 out: 4.337 spin_unlock(&lp->event_channel_lock); 4.338 + put_task_struct(lp); 4.339 + 4.340 if ( rp != NULL ) 4.341 { 4.342 spin_unlock(&rp->event_channel_lock); 4.343 @@ -222,87 +239,135 @@ static long event_channel_close(u16 lid) 4.344 } 4.345 4.346 4.347 -static long event_channel_send(u16 lid) 4.348 +static long event_channel_close(evtchn_close_t *close) 4.349 +{ 4.350 + struct task_struct *lp; 4.351 + int lport = close->local_port; 4.352 + long rc; 4.353 + domid_t ldom = close->local_dom; 4.354 + 4.355 + if ( ldom == DOMID_SELF ) 4.356 + ldom = current->domain; 4.357 + else if ( !IS_PRIV(current) ) 4.358 + return -EPERM; 4.359 + 4.360 + if ( (lp = find_domain_by_id(ldom)) == NULL ) 4.361 + return -ESRCH; 4.362 + 4.363 + rc = __event_channel_close(lp, lport); 4.364 + 4.365 + put_task_struct(lp); 4.366 + return rc; 4.367 +} 4.368 + 4.369 + 4.370 +static long event_channel_send(int lport) 4.371 { 4.372 struct task_struct *lp = current, *rp; 4.373 - u16 rid, rdom; 4.374 - shared_info_t *rsi; 4.375 + int rport; 4.376 unsigned long cpu_mask; 4.377 4.378 spin_lock(&lp->event_channel_lock); 4.379 4.380 - if ( unlikely(lid >= lp->max_event_channel) || 4.381 - unlikely(!(lp->event_channel[lid].flags & ECF_CONNECTED)) ) 4.382 + if ( unlikely(lport < 0) || 4.383 + unlikely(lport >= lp->max_event_channel) || 4.384 + unlikely(lp->event_channel[lport].state != ECS_CONNECTED) ) 4.385 { 4.386 spin_unlock(&lp->event_channel_lock); 4.387 return -EINVAL; 4.388 } 4.389 4.390 - rdom = lp->event_channel[lid].target_dom; 4.391 - rid = lp->event_channel[lid].flags & ECF_TARGET_ID; 4.392 + rp = lp->event_channel[lport].remote_dom; 4.393 + rport = lp->event_channel[lport].remote_port; 4.394 + 4.395 + get_task_struct(rp); 4.396 4.397 spin_unlock(&lp->event_channel_lock); 4.398 4.399 - if ( unlikely(rid >= MAX_EVENT_CHANNELS) || 4.400 - unlikely ((rp = find_domain_by_id(rdom)) == NULL) ) 4.401 - return -EINVAL; 4.402 - 4.403 - rsi = rp->shared_info; 4.404 - if ( !test_and_set_bit(rid, &rsi->event_channel_pend[0]) && 4.405 - !test_and_set_bit(rid>>5, &rsi->event_channel_pend_sel) ) 4.406 - { 4.407 - cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN); 4.408 - guest_event_notify(cpu_mask); 4.409 - } 4.410 + cpu_mask = set_event_pending(rp, rport); 4.411 + guest_event_notify(cpu_mask); 4.412 4.413 put_task_struct(rp); 4.414 + 4.415 return 0; 4.416 } 4.417 4.418 4.419 -static long event_channel_status(u16 lid) 4.420 +static long event_channel_status(evtchn_status_t *status) 4.421 { 4.422 - struct task_struct *lp = current; 4.423 + struct task_struct *lp; 4.424 + domid_t ldom = status->local_dom; 4.425 + int lport = status->local_port; 4.426 event_channel_t *lchn; 4.427 - long rc = EVTCHNSTAT_closed; 4.428 + 4.429 + if ( ldom == DOMID_SELF ) 4.430 + ldom = current->domain; 4.431 + else if ( !IS_PRIV(current) ) 4.432 + return -EPERM; 4.433 + 4.434 + if ( (lp = find_domain_by_id(ldom)) == NULL ) 4.435 + return -ESRCH; 4.436 4.437 spin_lock(&lp->event_channel_lock); 4.438 4.439 lchn = lp->event_channel; 4.440 4.441 - if ( lid < lp->max_event_channel ) 4.442 + if ( (lport < 0) || (lport >= lp->max_event_channel) ) 4.443 + { 4.444 + spin_unlock(&lp->event_channel_lock); 4.445 + return -EINVAL; 4.446 + } 4.447 + 4.448 + switch ( lchn[lport].state ) 4.449 { 4.450 - if ( lchn[lid].flags & ECF_CONNECTED ) 4.451 - rc = EVTCHNSTAT_connected; 4.452 - else if ( lchn[lid].flags & ECF_INUSE ) 4.453 - rc = EVTCHNSTAT_disconnected; 4.454 + case ECS_FREE: 4.455 + status->status = EVTCHNSTAT_closed; 4.456 + break; 4.457 + case ECS_ZOMBIE: 4.458 + status->status = EVTCHNSTAT_disconnected; 4.459 + break; 4.460 + case ECS_CONNECTED: 4.461 + status->status = EVTCHNSTAT_connected; 4.462 + status->remote_dom = lchn[lport].remote_dom->domain; 4.463 + status->remote_port = lchn[lport].remote_port; 4.464 + break; 4.465 + default: 4.466 + BUG(); 4.467 } 4.468 4.469 spin_unlock(&lp->event_channel_lock); 4.470 - return rc; 4.471 + return 0; 4.472 } 4.473 4.474 4.475 -long do_event_channel_op(unsigned int cmd, unsigned int id) 4.476 +long do_event_channel_op(evtchn_op_t *uop) 4.477 { 4.478 long rc; 4.479 + evtchn_op_t op; 4.480 4.481 - switch ( cmd ) 4.482 + if ( copy_from_user(&op, uop, sizeof(op)) != 0 ) 4.483 + return -EFAULT; 4.484 + 4.485 + switch ( op.cmd ) 4.486 { 4.487 case EVTCHNOP_open: 4.488 - rc = event_channel_open((u16)id); 4.489 + rc = event_channel_open(&op.u.open); 4.490 + if ( copy_to_user(uop, &op, sizeof(op)) != 0 ) 4.491 + rc = -EFAULT; /* Cleaning up here would be a mess! */ 4.492 break; 4.493 4.494 case EVTCHNOP_close: 4.495 - rc = event_channel_close((u16)id); 4.496 + rc = event_channel_close(&op.u.close); 4.497 break; 4.498 4.499 case EVTCHNOP_send: 4.500 - rc = event_channel_send((u16)id); 4.501 + rc = event_channel_send(op.u.send.local_port); 4.502 break; 4.503 4.504 case EVTCHNOP_status: 4.505 - rc = event_channel_status((u16)id); 4.506 + rc = event_channel_status(&op.u.status); 4.507 + if ( copy_to_user(uop, &op, sizeof(op)) != 0 ) 4.508 + rc = -EFAULT; 4.509 break; 4.510 4.511 default: 4.512 @@ -320,7 +385,7 @@ void destroy_event_channels(struct task_ 4.513 if ( p->event_channel != NULL ) 4.514 { 4.515 for ( i = 0; i < p->max_event_channel; i++ ) 4.516 - (void)event_channel_close((u16)i); 4.517 + (void)__event_channel_close(p, i); 4.518 kfree(p->event_channel); 4.519 } 4.520 }
5.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 5.2 +++ b/xen/include/hypervisor-ifs/event_channel.h Wed Feb 25 16:47:26 2004 +0000 5.3 @@ -0,0 +1,93 @@ 5.4 +/****************************************************************************** 5.5 + * event_channel.h 5.6 + * 5.7 + * Event channels between domains. 5.8 + * 5.9 + * Copyright (c) 2003-2004, K A Fraser. 5.10 + */ 5.11 + 5.12 +#ifndef __HYPERVISOR_IFS__EVENT_CHANNEL_H__ 5.13 +#define __HYPERVISOR_IFS__EVENT_CHANNEL_H__ 5.14 + 5.15 +/* 5.16 + * EVTCHNOP_open: Open a communication channel between <local_dom> and 5.17 + * <remote_dom>. 5.18 + * NOTES: 5.19 + * 1. <local_dom> may be specified as DOMID_SELF. 5.20 + * 2. Only a sufficiently-privileged domain may create an event channel. 5.21 + * 3. <local_port> and <remote_port> are only supplied if the op succeeds. 5.22 + */ 5.23 +#define EVTCHNOP_open 0 5.24 +typedef struct evtchn_open 5.25 +{ 5.26 + /* IN parameters. */ 5.27 + domid_t local_dom, remote_dom; 5.28 + /* OUT parameters. */ 5.29 + int local_port, remote_port; 5.30 +} evtchn_open_t; 5.31 + 5.32 +/* 5.33 + * EVTCHNOP_close: Close the communication channel which has an endpoint at 5.34 + * <local_dom, local_port>. 5.35 + * NOTES: 5.36 + * 1. <local_dom> may be specified as DOMID_SELF. 5.37 + * 2. Only a sufficiently-privileged domain may close an event channel 5.38 + * for which <local_dom> is not DOMID_SELF. 5.39 + */ 5.40 +#define EVTCHNOP_close 1 5.41 +typedef struct evtchn_close 5.42 +{ 5.43 + /* IN parameters. */ 5.44 + domid_t local_dom; 5.45 + int local_port; 5.46 + /* No OUT parameters. */ 5.47 +} evtchn_close_t; 5.48 + 5.49 +/* 5.50 + * EVTCHNOP_send: Send an event to the remote end of the channel whose local 5.51 + * endpoint is <DOMID_SELF, local_port>. 5.52 + */ 5.53 +#define EVTCHNOP_send 2 5.54 +typedef struct evtchn_send 5.55 +{ 5.56 + /* IN parameters. */ 5.57 + int local_port; 5.58 + /* No OUT parameters. */ 5.59 +} evtchn_send_t; 5.60 + 5.61 +/* 5.62 + * EVTCHNOP_status: Get the current status of the communication channel which 5.63 + * has an endpoint at <local_dom, local_port>. 5.64 + * NOTES: 5.65 + * 1. <local_dom> may be specified as DOMID_SELF. 5.66 + * 2. Only a sufficiently-privileged domain may obtain the status of an event 5.67 + * channel for which <local_dom> is not DOMID_SELF. 5.68 + * 3. <remote_dom, remote_port> is only supplied if status is 'connected'. 5.69 + */ 5.70 +#define EVTCHNOP_status 3 /* Get status of <channel id>. */ 5.71 +typedef struct evtchn_status 5.72 +{ 5.73 + /* IN parameters */ 5.74 + domid_t local_dom; 5.75 + int local_port; 5.76 + /* OUT parameters */ 5.77 + domid_t remote_dom; 5.78 + int remote_port; 5.79 +#define EVTCHNSTAT_closed 0 /* Chennel is not in use. */ 5.80 +#define EVTCHNSTAT_disconnected 1 /* Channel is not connected to remote. */ 5.81 +#define EVTCHNSTAT_connected 2 /* Channel is connected to remote. */ 5.82 + int status; 5.83 +} evtchn_status_t; 5.84 + 5.85 +typedef struct evtchn_op 5.86 +{ 5.87 + int cmd; /* EVTCHNOP_* */ 5.88 + union { 5.89 + evtchn_open_t open; 5.90 + evtchn_close_t close; 5.91 + evtchn_send_t send; 5.92 + evtchn_status_t status; 5.93 + } u; 5.94 +} evtchn_op_t; 5.95 + 5.96 +#endif /* __HYPERVISOR_IFS__EVENT_CHANNEL_H__ */
6.1 --- a/xen/include/hypervisor-ifs/hypervisor-if.h Wed Feb 25 13:24:01 2004 +0000 6.2 +++ b/xen/include/hypervisor-ifs/hypervisor-if.h Wed Feb 25 16:47:26 2004 +0000 6.3 @@ -168,21 +168,6 @@ 6.4 #define SCHEDOP_exit 3 /* Exit and kill this domain. */ 6.5 #define SCHEDOP_stop 4 /* Stop executing this domain. */ 6.6 6.7 -/* 6.8 - * EVTCHNOP_* - Event channel operations. 6.9 - */ 6.10 -#define EVTCHNOP_open 0 /* Open channel to <target domain>. */ 6.11 -#define EVTCHNOP_close 1 /* Close <channel id>. */ 6.12 -#define EVTCHNOP_send 2 /* Send event on <channel id>. */ 6.13 -#define EVTCHNOP_status 3 /* Get status of <channel id>. */ 6.14 - 6.15 -/* 6.16 - * EVTCHNSTAT_* - Non-error return values from EVTCHNOP_status. 6.17 - */ 6.18 -#define EVTCHNSTAT_closed 0 /* Chennel is not in use. */ 6.19 -#define EVTCHNSTAT_disconnected 1 /* Channel is not connected to remote. */ 6.20 -#define EVTCHNSTAT_connected 2 /* Channel is connected to remote. */ 6.21 - 6.22 6.23 #ifndef __ASSEMBLY__ 6.24
7.1 --- a/xen/include/xeno/sched.h Wed Feb 25 13:24:01 2004 +0000 7.2 +++ b/xen/include/xeno/sched.h Wed Feb 25 16:47:26 2004 +0000 7.3 @@ -45,13 +45,16 @@ extern struct mm_struct init_mm; 7.4 7.5 #define IS_PRIV(_p) (test_bit(PF_PRIVILEGED, &(_p)->flags)) 7.6 7.7 +struct task_struct; 7.8 + 7.9 typedef struct event_channel_st 7.10 { 7.11 - u16 target_dom; /* Target domain (i.e. domain at remote end). */ 7.12 -#define ECF_TARGET_ID ((1<<10)-1) /* Channel identifier at remote end. */ 7.13 -#define ECF_INUSE (1<<10) /* Is this channel descriptor in use? */ 7.14 -#define ECF_CONNECTED (1<<11) /* Is this channel connected to remote? */ 7.15 - u16 flags; 7.16 + struct task_struct *remote_dom; 7.17 + u16 remote_port; 7.18 +#define ECS_FREE 0 /* Available for use. */ 7.19 +#define ECS_ZOMBIE 1 /* Connection is closed. Remote is disconnected. */ 7.20 +#define ECS_CONNECTED 2 /* Connected to remote end. */ 7.21 + u16 state; 7.22 } event_channel_t; 7.23 7.24 struct task_struct