ia64/xen-unstable
changeset 810:3f44ecdcb631
bitkeeper revision 1.499 (3f867c85oOyUdtcboCzrLgktKtvdgA)
ac_timer.h, ac_timer.c:
Xen ac timers now use a heap to find earliest timeout.
ac_timer.h, ac_timer.c:
Xen ac timers now use a heap to find earliest timeout.
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Fri Oct 10 09:31:49 2003 +0000 (2003-10-10) |
parents | 79012ba592c9 |
children | b644fab116b4 |
files | xen/common/ac_timer.c xen/include/xeno/ac_timer.h |
line diff
1.1 --- a/xen/common/ac_timer.c Thu Oct 09 09:33:16 2003 +0000 1.2 +++ b/xen/common/ac_timer.c Fri Oct 10 09:31:49 2003 +0000 1.3 @@ -39,58 +39,139 @@ 1.4 */ 1.5 #define TIMER_SLOP (50*1000) /* ns */ 1.6 1.7 +#define DEFAULT_HEAP_LIMIT 127 1.8 + 1.9 /* A timer list per CPU */ 1.10 typedef struct ac_timers_st 1.11 { 1.12 - spinlock_t lock; 1.13 - struct list_head timers; 1.14 - s_time_t max_diff; 1.15 + spinlock_t lock; 1.16 + struct ac_timer **heap; 1.17 } __cacheline_aligned ac_timers_t; 1.18 static ac_timers_t ac_timers[NR_CPUS]; 1.19 1.20 1.21 -/***************************************************************************** 1.22 - * add a timer. 1.23 - * return value: CPU mask of remote processors to send an event to 1.24 - *****************************************************************************/ 1.25 +/**************************************************************************** 1.26 + * HEAP OPERATIONS. 1.27 + */ 1.28 + 1.29 +#define GET_HEAP_SIZE(_h) ((int)(((u16 *)(_h))[0])) 1.30 +#define SET_HEAP_SIZE(_h,_v) (((u16 *)(_h))[0] = (u16)(_v)) 1.31 + 1.32 +#define GET_HEAP_LIMIT(_h) ((int)(((u16 *)(_h))[1])) 1.33 +#define SET_HEAP_LIMIT(_h,_v) (((u16 *)(_h))[1] = (u16)(_v)) 1.34 + 1.35 +/* Sink down element @pos of @heap. */ 1.36 +static void down_heap(struct ac_timer **heap, int pos) 1.37 +{ 1.38 + int sz = GET_HEAP_SIZE(heap), nxt; 1.39 + struct ac_timer *t = heap[pos]; 1.40 + 1.41 + while ( (nxt = (pos << 1)) <= sz ) 1.42 + { 1.43 + if ( ((nxt+1) <= sz) && (heap[nxt+1]->expires < heap[nxt]->expires) ) 1.44 + nxt++; 1.45 + if ( heap[nxt]->expires > t->expires ) 1.46 + break; 1.47 + heap[pos] = heap[nxt]; 1.48 + heap[pos]->heap_offset = pos; 1.49 + pos = nxt; 1.50 + } 1.51 + 1.52 + heap[pos] = t; 1.53 + t->heap_offset = pos; 1.54 +} 1.55 + 1.56 +/* Float element @pos up @heap. */ 1.57 +static void up_heap(struct ac_timer **heap, int pos) 1.58 +{ 1.59 + struct ac_timer *t = heap[pos]; 1.60 + 1.61 + while ( (pos > 1) && (t->expires < heap[pos>>1]->expires) ) 1.62 + { 1.63 + heap[pos] = heap[pos>>1]; 1.64 + heap[pos]->heap_offset = pos; 1.65 + pos >>= 1; 1.66 + } 1.67 + 1.68 + heap[pos] = t; 1.69 + t->heap_offset = pos; 1.70 +} 1.71 + 1.72 + 1.73 +/* Delete @t from @heap. Return TRUE if new top of heap. */ 1.74 +static int remove_entry(struct ac_timer **heap, struct ac_timer *t) 1.75 +{ 1.76 + int sz = GET_HEAP_SIZE(heap); 1.77 + int pos = t->heap_offset; 1.78 + 1.79 + t->heap_offset = 0; 1.80 + 1.81 + if ( unlikely(pos == sz) ) 1.82 + { 1.83 + SET_HEAP_SIZE(heap, sz-1); 1.84 + goto out; 1.85 + } 1.86 + 1.87 + heap[pos] = heap[sz]; 1.88 + heap[pos]->heap_offset = pos; 1.89 + 1.90 + SET_HEAP_SIZE(heap, --sz); 1.91 + 1.92 + if ( (pos > 1) && (heap[pos]->expires < heap[pos>>1]->expires) ) 1.93 + up_heap(heap, pos); 1.94 + else 1.95 + down_heap(heap, pos); 1.96 + 1.97 + out: 1.98 + return (pos == 1); 1.99 +} 1.100 + 1.101 + 1.102 +/* Add new entry @t to @heap. Return TRUE if new top of heap. */ 1.103 +static int add_entry(struct ac_timer **heap, struct ac_timer *t) 1.104 +{ 1.105 + int sz = GET_HEAP_SIZE(heap); 1.106 + 1.107 + /* Copy the heap if it is full. */ 1.108 + if ( unlikely(sz == GET_HEAP_LIMIT(heap)) ) 1.109 + { 1.110 + int i, limit = (GET_HEAP_LIMIT(heap)+1) << 1; 1.111 + struct ac_timer **new_heap = kmalloc( 1.112 + limit * sizeof(struct ac_timer *), GFP_KERNEL); 1.113 + if ( new_heap == NULL ) BUG(); 1.114 + memcpy(new_heap, heap, (limit>>1) * sizeof(struct ac_timer *)); 1.115 + for ( i = 0; i < smp_num_cpus; i++ ) 1.116 + if ( ac_timers[i].heap == heap ) 1.117 + ac_timers[i].heap = new_heap; 1.118 + kfree(heap); 1.119 + heap = new_heap; 1.120 + SET_HEAP_LIMIT(heap, limit-1); 1.121 + } 1.122 + 1.123 + SET_HEAP_SIZE(heap, ++sz); 1.124 + heap[sz] = t; 1.125 + t->heap_offset = sz; 1.126 + up_heap(heap, sz); 1.127 + return (t->heap_offset == 1); 1.128 +} 1.129 + 1.130 + 1.131 +/**************************************************************************** 1.132 + * TIMER OPERATIONS. 1.133 + */ 1.134 + 1.135 static inline unsigned long __add_ac_timer(struct ac_timer *timer) 1.136 { 1.137 int cpu = timer->cpu; 1.138 + unsigned long cpu_mask = 0; 1.139 1.140 - /* 1.141 - * Add timer to the list. If it gets added to the front we schedule 1.142 - * a softirq. This will reprogram the timer, or handle the timer event 1.143 - * immediately, depending on whether alarm is sufficiently ahead in the 1.144 - * future. 1.145 - */ 1.146 - if ( list_empty(&ac_timers[cpu].timers) ) 1.147 - { 1.148 - list_add(&timer->timer_list, &ac_timers[cpu].timers); 1.149 - goto send_softirq; 1.150 - } 1.151 - else 1.152 + if ( add_entry(ac_timers[cpu].heap, timer) ) 1.153 { 1.154 - struct list_head *pos; 1.155 - struct ac_timer *t; 1.156 - 1.157 - list_for_each ( pos, &ac_timers[cpu].timers ) 1.158 - { 1.159 - t = list_entry(pos, struct ac_timer, timer_list); 1.160 - if ( t->expires > timer->expires ) 1.161 - break; 1.162 - } 1.163 - 1.164 - list_add(&(timer->timer_list), pos->prev); 1.165 - 1.166 - if ( timer->timer_list.prev == &ac_timers[cpu].timers ) 1.167 - goto send_softirq; 1.168 + __cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ); 1.169 + cpu_mask = (cpu != smp_processor_id()) ? 1<<cpu : 0; 1.170 } 1.171 1.172 - return 0; 1.173 - 1.174 - send_softirq: 1.175 - __cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ); 1.176 - return (cpu != smp_processor_id()) ? 1<<cpu : 0; 1.177 + return cpu_mask; 1.178 } 1.179 1.180 void add_ac_timer(struct ac_timer *timer) 1.181 @@ -109,54 +190,18 @@ void add_ac_timer(struct ac_timer *timer 1.182 } 1.183 1.184 1.185 -/***************************************************************************** 1.186 - * detach a timer (no locking) 1.187 - * return values: 1.188 - * 0: success 1.189 - * -1: bogus timer 1.190 - *****************************************************************************/ 1.191 -static inline void detach_ac_timer(struct ac_timer *timer) 1.192 -{ 1.193 - TRC(printk("ACT [%02d] detach(): \n", cpu)); 1.194 - list_del(&timer->timer_list); 1.195 - timer->timer_list.next = NULL; 1.196 -} 1.197 - 1.198 - 1.199 -/***************************************************************************** 1.200 - * remove a timer 1.201 - * return values: CPU mask of remote processors to send an event to 1.202 - *****************************************************************************/ 1.203 static inline unsigned long __rem_ac_timer(struct ac_timer *timer) 1.204 { 1.205 int cpu = timer->cpu; 1.206 - 1.207 - TRC(printk("ACT [%02d] remove(): timo=%lld \n", cpu, timer->expires)); 1.208 - ASSERT(timer->timer_list.next); 1.209 + unsigned long cpu_mask = 0; 1.210 1.211 - detach_ac_timer(timer); 1.212 - 1.213 - if ( timer->timer_list.prev == &ac_timers[cpu].timers ) 1.214 + if ( remove_entry(ac_timers[cpu].heap, timer) ) 1.215 { 1.216 - /* just removed the head */ 1.217 - if ( list_empty(&ac_timers[cpu].timers) ) 1.218 - { 1.219 - goto send_softirq; 1.220 - } 1.221 - else 1.222 - { 1.223 - timer = list_entry(ac_timers[cpu].timers.next, 1.224 - struct ac_timer, timer_list); 1.225 - if ( timer->expires > (NOW() + TIMER_SLOP) ) 1.226 - goto send_softirq; 1.227 - } 1.228 + __cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ); 1.229 + cpu_mask = (cpu != smp_processor_id()) ? 1<<cpu : 0; 1.230 } 1.231 1.232 - return 0; 1.233 - 1.234 - send_softirq: 1.235 - __cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ); 1.236 - return (cpu != smp_processor_id()) ? 1<<cpu : 0; 1.237 + return cpu_mask; 1.238 } 1.239 1.240 void rem_ac_timer(struct ac_timer *timer) 1.241 @@ -175,13 +220,6 @@ void rem_ac_timer(struct ac_timer *timer 1.242 } 1.243 1.244 1.245 -/***************************************************************************** 1.246 - * modify a timer, i.e., set a new timeout value 1.247 - * return value: 1.248 - * 0: sucess 1.249 - * 1: timeout error 1.250 - * -1: bogus timer 1.251 - *****************************************************************************/ 1.252 void mod_ac_timer(struct ac_timer *timer, s_time_t new_time) 1.253 { 1.254 int cpu = timer->cpu; 1.255 @@ -203,35 +241,25 @@ void mod_ac_timer(struct ac_timer *timer 1.256 } 1.257 1.258 1.259 -/***************************************************************************** 1.260 - * do_ac_timer 1.261 - * deal with timeouts and run the handlers 1.262 - *****************************************************************************/ 1.263 void do_ac_timer(void) 1.264 { 1.265 int cpu = smp_processor_id(); 1.266 unsigned long flags; 1.267 - struct ac_timer *t; 1.268 - s_time_t diff, now = NOW(); 1.269 - long max; 1.270 + struct ac_timer *t, **heap; 1.271 + s_time_t diff, now = NOW(); 1.272 + long max; 1.273 1.274 spin_lock_irqsave(&ac_timers[cpu].lock, flags); 1.275 1.276 do_timer_again: 1.277 TRC(printk("ACT [%02d] do(): now=%lld\n", cpu, NOW())); 1.278 - 1.279 - /* Sanity: is the timer list empty? */ 1.280 - if ( list_empty(&ac_timers[cpu].timers) ) 1.281 - goto out; 1.282 + 1.283 + heap = ac_timers[cpu].heap; 1.284 1.285 - /* Handle all timeouts in the near future. */ 1.286 - while ( !list_empty(&ac_timers[cpu].timers) ) 1.287 + while ( (GET_HEAP_SIZE(heap) != 0) && 1.288 + ((t = heap[1])->expires < (NOW() + TIMER_SLOP)) ) 1.289 { 1.290 - t = list_entry(ac_timers[cpu].timers.next,struct ac_timer, timer_list); 1.291 - if ( t->expires > (NOW() + TIMER_SLOP) ) 1.292 - break; 1.293 - 1.294 - ASSERT(t->cpu == cpu); 1.295 + remove_entry(heap, t); 1.296 1.297 /* Do some stats collection. */ 1.298 diff = (now - t->expires); 1.299 @@ -241,31 +269,25 @@ void do_ac_timer(void) 1.300 if ( diff > max ) 1.301 perfc_seta(ac_timer_max, cpu, diff); 1.302 1.303 - detach_ac_timer(t); 1.304 - 1.305 spin_unlock_irqrestore(&ac_timers[cpu].lock, flags); 1.306 if ( t->function != NULL ) 1.307 t->function(t->data); 1.308 spin_lock_irqsave(&ac_timers[cpu].lock, flags); 1.309 + 1.310 + /* Heap may have grown while the lock was released. */ 1.311 + heap = ac_timers[cpu].heap; 1.312 } 1.313 - 1.314 - /* If list not empty then reprogram timer to new head of list */ 1.315 - if ( !list_empty(&ac_timers[cpu].timers) ) 1.316 + 1.317 + if ( GET_HEAP_SIZE(heap) != 0 ) 1.318 { 1.319 - t = list_entry(ac_timers[cpu].timers.next,struct ac_timer, timer_list); 1.320 - TRC(printk("ACT [%02d] do(): reprog timo=%lld\n",cpu,t->expires)); 1.321 - if ( !reprogram_ac_timer(t->expires) ) 1.322 - { 1.323 - TRC(printk("ACT [%02d] do(): again\n", cpu)); 1.324 + if ( !reprogram_ac_timer(heap[1]->expires) ) 1.325 goto do_timer_again; 1.326 - } 1.327 } 1.328 else 1.329 { 1.330 - reprogram_ac_timer((s_time_t) 0); 1.331 + reprogram_ac_timer(0); 1.332 } 1.333 1.334 - out: 1.335 spin_unlock_irqrestore(&ac_timers[cpu].lock, flags); 1.336 TRC(printk("ACT [%02d] do(): end\n", cpu)); 1.337 } 1.338 @@ -273,31 +295,29 @@ void do_ac_timer(void) 1.339 1.340 static void ac_timer_softirq_action(struct softirq_action *a) 1.341 { 1.342 - int cpu = smp_processor_id(); 1.343 - unsigned long flags; 1.344 - struct ac_timer *t; 1.345 - struct list_head *tlist; 1.346 - int process_timer_list = 0; 1.347 + int cpu = smp_processor_id(); 1.348 + unsigned long flags; 1.349 + struct ac_timer *t; 1.350 + int process_timer_list = 0; 1.351 1.352 spin_lock_irqsave(&ac_timers[cpu].lock, flags); 1.353 1.354 - tlist = &ac_timers[cpu].timers; 1.355 - if ( list_empty(tlist) ) 1.356 - { 1.357 - /* No deadline to program the timer with.*/ 1.358 - reprogram_ac_timer((s_time_t)0); 1.359 - } 1.360 - else 1.361 + if ( GET_HEAP_SIZE(ac_timers[cpu].heap) != 0 ) 1.362 { 1.363 /* 1.364 * Reprogram timer with earliest deadline. If that has already passed 1.365 * then we will process the timer list as soon as we release the lock. 1.366 */ 1.367 - t = list_entry(tlist, struct ac_timer, timer_list); 1.368 + t = ac_timers[cpu].heap[1]; 1.369 if ( (t->expires < (NOW() + TIMER_SLOP)) || 1.370 !reprogram_ac_timer(t->expires) ) 1.371 process_timer_list = 1; 1.372 } 1.373 + else 1.374 + { 1.375 + /* No deadline to program the timer with.*/ 1.376 + reprogram_ac_timer((s_time_t)0); 1.377 + } 1.378 1.379 spin_unlock_irqrestore(&ac_timers[cpu].lock, flags); 1.380 1.381 @@ -306,32 +326,12 @@ static void ac_timer_softirq_action(stru 1.382 } 1.383 1.384 1.385 -static void dump_tqueue(struct list_head *queue, char *name) 1.386 -{ 1.387 - struct list_head *list; 1.388 - int loop = 0; 1.389 - struct ac_timer *t; 1.390 - 1.391 - printk ("QUEUE %s %lx n: %lx, p: %lx\n", name, (unsigned long)queue, 1.392 - (unsigned long) queue->next, (unsigned long) queue->prev); 1.393 - 1.394 - list_for_each ( list, queue ) 1.395 - { 1.396 - t = list_entry(list, struct ac_timer, timer_list); 1.397 - printk (" %s %d : %lx ex=0x%08X%08X %lu n: %lx, p: %lx\n", 1.398 - name, loop++, 1.399 - (unsigned long)list, 1.400 - (u32)(t->expires>>32), (u32)t->expires, t->data, 1.401 - (unsigned long)list->next, (unsigned long)list->prev); 1.402 - } 1.403 -} 1.404 - 1.405 - 1.406 static void dump_timerq(u_char key, void *dev_id, struct pt_regs *regs) 1.407 { 1.408 - u_long flags; 1.409 - s_time_t now = NOW(); 1.410 - int i; 1.411 + struct ac_timer *t; 1.412 + unsigned long flags; 1.413 + s_time_t now = NOW(); 1.414 + int i, j; 1.415 1.416 printk("Dumping ac_timer queues: NOW=0x%08X%08X\n", 1.417 (u32)(now>>32), (u32)now); 1.418 @@ -340,7 +340,12 @@ static void dump_timerq(u_char key, void 1.419 { 1.420 printk("CPU[%02d] ", i); 1.421 spin_lock_irqsave(&ac_timers[i].lock, flags); 1.422 - dump_tqueue(&ac_timers[i].timers, "ac_time"); 1.423 + for ( j = 1; j <= GET_HEAP_SIZE(ac_timers[i].heap); j++ ) 1.424 + { 1.425 + t = ac_timers[i].heap[j]; 1.426 + printk (" %d : %p ex=0x%08X%08X %lu\n", 1.427 + j, t, (u32)(t->expires>>32), (u32)t->expires, t->data); 1.428 + } 1.429 spin_unlock_irqrestore(&ac_timers[i].lock, flags); 1.430 printk("\n"); 1.431 } 1.432 @@ -355,9 +360,13 @@ void __init ac_timer_init(void) 1.433 1.434 open_softirq(AC_TIMER_SOFTIRQ, ac_timer_softirq_action, NULL); 1.435 1.436 - for (i = 0; i < NR_CPUS; i++) 1.437 + for ( i = 0; i < smp_num_cpus; i++ ) 1.438 { 1.439 - INIT_LIST_HEAD(&ac_timers[i].timers); 1.440 + ac_timers[i].heap = kmalloc( 1.441 + (DEFAULT_HEAP_LIMIT+1) * sizeof(struct ac_timer *), GFP_KERNEL); 1.442 + if ( ac_timers[i].heap == NULL ) BUG(); 1.443 + SET_HEAP_SIZE(ac_timers[i].heap, 0); 1.444 + SET_HEAP_LIMIT(ac_timers[i].heap, DEFAULT_HEAP_LIMIT); 1.445 spin_lock_init(&ac_timers[i].lock); 1.446 } 1.447
2.1 --- a/xen/include/xeno/ac_timer.h Thu Oct 09 09:33:16 2003 +0000 2.2 +++ b/xen/include/xeno/ac_timer.h Fri Oct 10 09:31:49 2003 +0000 2.3 @@ -43,11 +43,11 @@ 2.4 */ 2.5 2.6 struct ac_timer { 2.7 - struct list_head timer_list; 2.8 s_time_t expires; /* system time time out value */ 2.9 unsigned long data; 2.10 void (*function)(unsigned long); 2.11 - int cpu; 2.12 + unsigned int cpu; 2.13 + unsigned int heap_offset; 2.14 }; 2.15 2.16 /* interface for "clients" */ 2.17 @@ -57,12 +57,12 @@ extern void mod_ac_timer(struct ac_timer 2.18 static __inline__ void init_ac_timer(struct ac_timer *timer, int cpu) 2.19 { 2.20 timer->cpu = cpu; 2.21 - timer->timer_list.next = NULL; 2.22 + timer->heap_offset = 0; 2.23 } 2.24 /* check if ac_timer is active, i.e., on the list */ 2.25 static __inline__ int active_ac_timer(struct ac_timer *timer) 2.26 { 2.27 - return (timer->timer_list.next != NULL); 2.28 + return (timer->heap_offset != 0); 2.29 } 2.30 2.31 /* interface used by programmable timer, implemented hardware dependent */