win-pvdrivers

view liblfds.6/test/src/test_ringbuffer.c @ 766:6300617040e0

Big changes - not ready for production use.
Removed all the custom DMA handling code as it was completely incompatible with the Windows verifier.
Added liblfds (using the lock free stack) from liblfds.org so that grant's can be obtained at DIRQL.
Fixed xennet and xenvbd to support the changes.
xenusb and xenscsi almost certainly will not yet work after the changes.
author James Harper <james.harper@bendigoit.com.au>
date Sun Jan 31 21:28:42 2010 +1100 (2010-01-31)
parents
children
line source
1 #include "internal.h"
7 /****************************************************************************/
8 void test_ringbuffer( void )
9 {
10 printf( "\n"
11 "Ringbuffer Tests\n"
12 "================\n" );
14 ringbuffer_test_reading();
15 ringbuffer_test_writing();
16 ringbuffer_test_reading_and_writing();
18 return;
19 }
25 /****************************************************************************/
26 void ringbuffer_test_reading( void )
27 {
28 unsigned int
29 loop,
30 cpu_count;
32 thread_state_t
33 *thread_handles;
35 struct ringbuffer_state
36 *rs;
38 struct freelist_element
39 *fe;
41 struct ringbuffer_test_reading_state
42 *rtrs;
44 struct validation_info
45 vi = { 0, 0 };
47 enum data_structure_validity
48 dvs[3];
50 atom_t
51 total_read = 0;
53 /* TRD : we create a single ringbuffer
54 with 1,000,000 elements
55 we populate the ringbuffer, where the
56 user data is an incrementing counter
58 we create one thread per CPU
59 where each thread busy-works,
60 reading until the ringbuffer is empty
62 each thread keep track of the number of reads it manages
63 and that each user data it reads is greater than the
64 previous user data that was read
65 */
67 internal_display_test_name( "Reading" );
69 cpu_count = abstraction_cpu_count();
71 ringbuffer_new( &rs, 1000000, NULL, NULL );
73 for( loop = 0 ; loop < 1000000 ; loop++ )
74 {
75 ringbuffer_get_write_element( rs, &fe, NULL );
76 freelist_set_user_data_in_element( fe, (void *) (atom_t) loop );
77 ringbuffer_put_write_element( rs, fe );
78 }
80 rtrs = malloc( sizeof(struct ringbuffer_test_reading_state) * cpu_count );
82 for( loop = 0 ; loop < cpu_count ; loop++ )
83 {
84 (rtrs+loop)->rs = rs;
85 (rtrs+loop)->read_count = 0;
86 (rtrs+loop)->error_flag = LOWERED;
87 }
89 thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
91 for( loop = 0 ; loop < cpu_count ; loop++ )
92 abstraction_thread_start( &thread_handles[loop], loop, ringbuffer_test_thread_simple_reader, rtrs+loop );
94 for( loop = 0 ; loop < cpu_count ; loop++ )
95 abstraction_thread_wait( thread_handles[loop] );
97 free( thread_handles );
99 ringbuffer_query( rs, RINGBUFFER_QUERY_VALIDATE, (void *) &vi, (void *) dvs );
101 // TRD : check for raised error flags
102 for( loop = 0 ; loop < cpu_count ; loop++ )
103 if( (rtrs+loop)->error_flag == RAISED )
104 dvs[0] = VALIDITY_INVALID_TEST_DATA;
106 // TRD : check thread reads total to 1,000,000
107 for( loop = 0 ; loop < cpu_count ; loop++ )
108 total_read += (rtrs+loop)->read_count;
110 if( total_read < 1000000 )
111 dvs[0] = VALIDITY_INVALID_MISSING_ELEMENTS;
113 if( total_read > 1000000 )
114 dvs[0] = VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
116 free( rtrs );
118 ringbuffer_delete( rs, NULL, NULL );
120 internal_display_test_result( 3, "queue", dvs[0], "queue freelist", dvs[1], "freelist", dvs[2] );
122 return;
123 }
129 /****************************************************************************/
130 thread_return_t CALLING_CONVENTION ringbuffer_test_thread_simple_reader( void *ringbuffer_test_reading_state )
131 {
132 struct ringbuffer_test_reading_state
133 *rtrs;
135 struct freelist_element
136 *fe;
138 atom_t
139 *prev_user_data,
140 *user_data;
142 assert( ringbuffer_test_reading_state != NULL );
144 rtrs = (struct ringbuffer_test_reading_state *) ringbuffer_test_reading_state;
146 ringbuffer_get_read_element( rtrs->rs, &fe );
147 freelist_get_user_data_from_element( fe, (void **) &prev_user_data );
148 ringbuffer_put_read_element( rtrs->rs, fe );
150 rtrs->read_count++;
152 while( ringbuffer_get_read_element(rtrs->rs, &fe) )
153 {
154 freelist_get_user_data_from_element( fe, (void **) &user_data );
155 ringbuffer_put_read_element( rtrs->rs, fe );
157 if( user_data <= prev_user_data )
158 rtrs->error_flag = RAISED;
160 prev_user_data = user_data;
162 rtrs->read_count++;
163 }
165 return( (thread_return_t) EXIT_SUCCESS );
166 }
172 /****************************************************************************/
173 void ringbuffer_test_writing( void )
174 {
175 unsigned int
176 loop,
177 cpu_count;
179 thread_state_t
180 *thread_handles;
182 struct ringbuffer_state
183 *rs;
185 struct freelist_element
186 *fe;
188 struct ringbuffer_test_writing_state
189 *rtws;
191 struct validation_info
192 vi = { 100000, 100000 };
194 enum data_structure_validity
195 dvs[3];
197 atom_t
198 thread,
199 count,
200 user_data,
201 *per_thread_counters;
203 /* TRD : we create a single ringbuffer
204 with 100000 elements
205 the ringbuffers starts empty
207 we create one thread per CPU
208 where each thread busy-works writing
209 for ten seconds
211 the user data in each written element is a combination
212 of the thread number and the counter
214 after the threads are complete, we validate by
215 checking the user data counters increment on a per thread
216 basis
217 */
219 internal_display_test_name( "Writing (10 seconds)" );
221 cpu_count = abstraction_cpu_count();
223 ringbuffer_new( &rs, 100000, NULL, NULL );
225 rtws = malloc( sizeof(struct ringbuffer_test_writing_state) * cpu_count );
227 for( loop = 0 ; loop < cpu_count ; loop++ )
228 {
229 (rtws+loop)->rs = rs;
230 (rtws+loop)->write_count = (atom_t) loop << (sizeof(atom_t)*8-8);
231 }
233 thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
235 for( loop = 0 ; loop < cpu_count ; loop++ )
236 abstraction_thread_start( &thread_handles[loop], loop, ringbuffer_test_thread_simple_writer, rtws+loop );
238 for( loop = 0 ; loop < cpu_count ; loop++ )
239 abstraction_thread_wait( thread_handles[loop] );
241 free( thread_handles );
243 // TRD : now check results
244 per_thread_counters = malloc( sizeof(atom_t) * cpu_count );
246 for( loop = 0 ; loop < cpu_count ; loop++ )
247 *(per_thread_counters+loop) = 0;
249 ringbuffer_query( rs, RINGBUFFER_QUERY_VALIDATE, (void *) &vi, (void *) dvs );
251 while( dvs[0] == VALIDITY_VALID and dvs[1] == VALIDITY_VALID and dvs[2] == VALIDITY_VALID and ringbuffer_get_read_element(rs, &fe) )
252 {
253 freelist_get_user_data_from_element( fe, (void *) &user_data );
255 thread = user_data >> (sizeof(atom_t)*8-8);
256 count = (user_data << 8) >> 8;
258 if( thread >= cpu_count )
259 {
260 dvs[0] = VALIDITY_INVALID_TEST_DATA;
261 ringbuffer_put_read_element( rs, fe );
262 break;
263 }
265 if( per_thread_counters[thread] == 0 )
266 per_thread_counters[thread] = count;
268 if( count < per_thread_counters[thread] )
269 dvs[0] = VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
271 if( count >= per_thread_counters[thread] )
272 per_thread_counters[thread] = count+1;
274 ringbuffer_put_read_element( rs, fe );
275 }
277 free( per_thread_counters );
279 free( rtws );
281 ringbuffer_delete( rs, NULL, NULL );
283 internal_display_test_result( 3, "queue", dvs[0], "queue freelist", dvs[1], "freelist", dvs[2] );
285 return;
286 }
292 /****************************************************************************/
293 thread_return_t CALLING_CONVENTION ringbuffer_test_thread_simple_writer( void *ringbuffer_test_writing_state )
294 {
295 struct ringbuffer_test_writing_state
296 *rtws;
298 struct freelist_element
299 *fe;
301 time_t
302 start_time;
304 assert( ringbuffer_test_writing_state != NULL );
306 rtws = (struct ringbuffer_test_writing_state *) ringbuffer_test_writing_state;
308 time( &start_time );
310 while( time(NULL) < start_time + 10 )
311 {
312 ringbuffer_get_write_element( rtws->rs, &fe, NULL );
313 freelist_set_user_data_in_element( fe, (void *) (atom_t) (rtws->write_count++) );
314 ringbuffer_put_write_element( rtws->rs, fe );
315 }
317 return( (thread_return_t) EXIT_SUCCESS );
318 }
324 /****************************************************************************/
325 void ringbuffer_test_reading_and_writing( void )
326 {
327 unsigned int
328 loop,
329 subloop,
330 cpu_count;
332 thread_state_t
333 *thread_handles;
335 struct ringbuffer_state
336 *rs;
338 struct ringbuffer_test_reading_and_writing_state
339 *rtrws;
341 struct validation_info
342 vi = { 0, 0 };
344 enum data_structure_validity
345 dvs[3];
347 /* TRD : we create a single ringbuffer
348 with 100000 elements
349 the ringbuffers starts empty
351 we create one thread per CPU
352 where each thread busy-works writing
353 and then immediately reading
354 for ten seconds
356 the user data in each written element is a combination
357 of the thread number and the counter
359 while a thread runs, it keeps track of the
360 counters for the other threads and throws an error
361 if it sees the number stay the same or decrease
362 */
364 internal_display_test_name( "Reading and writing (10 seconds)" );
366 cpu_count = abstraction_cpu_count();
368 ringbuffer_new( &rs, 100000, NULL, NULL );
370 rtrws = malloc( sizeof(struct ringbuffer_test_reading_and_writing_state) * cpu_count );
372 for( loop = 0 ; loop < cpu_count ; loop++ )
373 {
374 (rtrws+loop)->rs = rs;
375 (rtrws+loop)->counter = (atom_t) loop << (sizeof(atom_t)*8-8);
376 (rtrws+loop)->cpu_count = cpu_count;
377 (rtrws+loop)->error_flag = LOWERED;
378 (rtrws+loop)->per_thread_counters = malloc( sizeof(atom_t) * cpu_count );
380 for( subloop = 0 ; subloop < cpu_count ; subloop++ )
381 *((rtrws+loop)->per_thread_counters+subloop) = 0;
382 }
384 thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
386 for( loop = 0 ; loop < cpu_count ; loop++ )
387 abstraction_thread_start( &thread_handles[loop], loop, ringbuffer_test_thread_reader_writer, rtrws+loop );
389 for( loop = 0 ; loop < cpu_count ; loop++ )
390 abstraction_thread_wait( thread_handles[loop] );
392 free( thread_handles );
394 ringbuffer_query( rs, RINGBUFFER_QUERY_VALIDATE, (void *) &vi, (void *) dvs );
396 for( loop = 0 ; loop < cpu_count ; loop++ )
397 if( (rtrws+loop)->error_flag == RAISED )
398 dvs[0] = VALIDITY_INVALID_TEST_DATA;
400 for( loop = 0 ; loop < cpu_count ; loop++ )
401 free( (rtrws+loop)->per_thread_counters );
403 free( rtrws );
405 ringbuffer_delete( rs, NULL, NULL );
407 internal_display_test_result( 3, "queue", dvs[0], "queue freelist", dvs[1], "freelist", dvs[2] );
409 return;
410 }
416 /****************************************************************************/
417 thread_return_t CALLING_CONVENTION ringbuffer_test_thread_reader_writer( void *ringbuffer_test_reading_and_writing_state )
418 {
419 struct ringbuffer_test_reading_and_writing_state
420 *rtrws;
422 struct freelist_element
423 *fe;
425 atom_t
426 user_data,
427 thread,
428 count;
430 time_t
431 start_time;
433 assert( ringbuffer_test_reading_and_writing_state != NULL );
435 rtrws = (struct ringbuffer_test_reading_and_writing_state *) ringbuffer_test_reading_and_writing_state;
437 time( &start_time );
439 while( time(NULL) < start_time + 10 )
440 {
441 ringbuffer_get_write_element( rtrws->rs, &fe, NULL );
442 freelist_set_user_data_in_element( fe, (void *) (atom_t) (rtrws->counter++) );
443 ringbuffer_put_write_element( rtrws->rs, fe );
445 ringbuffer_get_read_element( rtrws->rs, &fe );
446 freelist_get_user_data_from_element( fe, (void *) &user_data );
448 thread = user_data >> (sizeof(atom_t)*8-8);
449 count = (user_data << 8) >> 8;
451 if( thread >= rtrws->cpu_count )
452 rtrws->error_flag = RAISED;
453 else
454 {
455 if( count < rtrws->per_thread_counters[thread] )
456 rtrws->error_flag = RAISED;
458 if( count >= rtrws->per_thread_counters[thread] )
459 rtrws->per_thread_counters[thread] = count+1;
460 }
462 ringbuffer_put_read_element( rtrws->rs, fe );
463 }
465 return( (thread_return_t) EXIT_SUCCESS );
466 }