ia64/xen-unstable

view xen/arch/ia64/linux-xen/sort.c @ 9770:ced37bea0647

[IA64] FPH enabling + cleanup

Move contents of switch_to macro from xensystem.h to context_switch function.
Initialize FPU on all processors. FPH is always enabled in Xen.
Speed up context-switch (a little bit!) by not enabling/disabling FPH.
Cleanup (unused function/variablesi/fields, debug printf...)
vmx_ia64_switch_to removed (was unused).

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Tue Apr 25 22:35:41 2006 -0600 (2006-04-25)
parents b7276814008c
children
line source
1 /*
2 * A fast, small, non-recursive O(nlog n) sort for the Linux kernel
3 *
4 * Jan 23 2005 Matt Mackall <mpm@selenic.com>
5 */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #ifdef XEN
10 #include <linux/types.h>
11 #endif
13 void u32_swap(void *a, void *b, int size)
14 {
15 u32 t = *(u32 *)a;
16 *(u32 *)a = *(u32 *)b;
17 *(u32 *)b = t;
18 }
20 void generic_swap(void *a, void *b, int size)
21 {
22 char t;
24 do {
25 t = *(char *)a;
26 *(char *)a++ = *(char *)b;
27 *(char *)b++ = t;
28 } while (--size > 0);
29 }
31 /*
32 * sort - sort an array of elements
33 * @base: pointer to data to sort
34 * @num: number of elements
35 * @size: size of each element
36 * @cmp: pointer to comparison function
37 * @swap: pointer to swap function or NULL
38 *
39 * This function does a heapsort on the given array. You may provide a
40 * swap function optimized to your element type.
41 *
42 * Sorting time is O(n log n) both on average and worst-case. While
43 * qsort is about 20% faster on average, it suffers from exploitable
44 * O(n*n) worst-case behavior and extra memory requirements that make
45 * it less suitable for kernel use.
46 */
48 void sort(void *base, size_t num, size_t size,
49 int (*cmp)(const void *, const void *),
50 void (*swap)(void *, void *, int size))
51 {
52 /* pre-scale counters for performance */
53 int i = (num/2) * size, n = num * size, c, r;
55 if (!swap)
56 swap = (size == 4 ? u32_swap : generic_swap);
58 /* heapify */
59 for ( ; i >= 0; i -= size) {
60 for (r = i; r * 2 < n; r = c) {
61 c = r * 2;
62 if (c < n - size && cmp(base + c, base + c + size) < 0)
63 c += size;
64 if (cmp(base + r, base + c) >= 0)
65 break;
66 swap(base + r, base + c, size);
67 }
68 }
70 /* sort */
71 for (i = n - size; i >= 0; i -= size) {
72 swap(base, base + i, size);
73 for (r = 0; r * 2 < i; r = c) {
74 c = r * 2;
75 if (c < i - size && cmp(base + c, base + c + size) < 0)
76 c += size;
77 if (cmp(base + r, base + c) >= 0)
78 break;
79 swap(base + r, base + c, size);
80 }
81 }
82 }
84 EXPORT_SYMBOL(sort);
86 #if 0
87 /* a simple boot-time regression test */
89 int cmpint(const void *a, const void *b)
90 {
91 return *(int *)a - *(int *)b;
92 }
94 static int sort_test(void)
95 {
96 int *a, i, r = 1;
98 a = kmalloc(1000 * sizeof(int), GFP_KERNEL);
99 BUG_ON(!a);
101 printk("testing sort()\n");
103 for (i = 0; i < 1000; i++) {
104 r = (r * 725861) % 6599;
105 a[i] = r;
106 }
108 sort(a, 1000, sizeof(int), cmpint, NULL);
110 for (i = 0; i < 999; i++)
111 if (a[i] > a[i+1]) {
112 printk("sort() failed!\n");
113 break;
114 }
116 kfree(a);
118 return 0;
119 }
121 module_init(sort_test);
122 #endif