1 //==========================================================================
5 // Basic thread stress test
7 //==========================================================================
8 //####ECOSGPLCOPYRIGHTBEGIN####
9 // -------------------------------------------
10 // This file is part of eCos, the Embedded Configurable Operating System.
11 // Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.
13 // eCos is free software; you can redistribute it and/or modify it under
14 // the terms of the GNU General Public License as published by the Free
15 // Software Foundation; either version 2 or (at your option) any later version.
17 // eCos is distributed in the hope that it will be useful, but WITHOUT ANY
18 // WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 // You should have received a copy of the GNU General Public License along
23 // with eCos; if not, write to the Free Software Foundation, Inc.,
24 // 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
26 // As a special exception, if other files instantiate templates or use macros
27 // or inline functions from this file, or you compile this file and link it
28 // with other works to produce a work based on this file, this file does not
29 // by itself cause the resulting work to be covered by the GNU General Public
30 // License. However the source code for this file must still be made available
31 // in accordance with section (3) of the GNU General Public License.
33 // This exception does not invalidate any other reasons why a work based on
34 // this file might be covered by the GNU General Public License.
36 // Alternative licenses for eCos may be arranged by contacting Red Hat, Inc.
37 // at http://sources.redhat.com/ecos/ecos-license/
38 // -------------------------------------------
39 //####ECOSGPLCOPYRIGHTEND####
40 //==========================================================================
41 //#####DESCRIPTIONBEGIN####
44 // Contributors: rosalia, jskov
46 // Description: Very simple thread stress test, with some memory
47 // allocation and alarm handling.
50 // If client_makes_request is big, it means that there are made many more
51 // client requests than can be serviced. Consequently, clients are wasting
52 // CPU time and should be sleeping more.
54 // The list of handler invocations show how many threads are running
55 // at the same time. The more powerful the CPU, the more the numbers
57 //####DESCRIPTIONEND####
59 #include <pkgconf/system.h>
60 #include <cyg/infra/testcase.h>
62 #include <cyg/hal/hal_arch.h>
64 #if defined(CYGPKG_KERNEL) && defined(CYGPKG_IO) && defined(CYGPKG_ISOINFRA)
66 #include <pkgconf/kernel.h>
67 #include <pkgconf/isoinfra.h>
68 #include CYGHWR_MEMORY_LAYOUT_H
70 #if defined(CYGFUN_KERNEL_API_C)
72 #include <cyg/kernel/kapi.h>
74 #ifdef CYGINT_ISO_STDIO_FORMATTED_IO
79 #if defined(CYGPKG_LIBM)
84 #include <cyg/kernel/test/stackmon.h>
86 #if defined(CYGFUN_KERNEL_THREADS_TIMER)
89 /* if TIME_LIMIT is defined, it represents the number of seconds this
90 test should last; if it is undefined the test will go forever */
91 #define DEATH_TIME_LIMIT 20
92 /* #undef DEATH_TIME_LIMIT */
94 // STACK_SIZE is typical +2kB for printf family calls which use big
95 // auto variables. Add more for handler which calls perform_stressful_tasks()
96 #define STACK_SIZE (2*1024 + CYGNUM_HAL_STACK_SIZE_TYPICAL)
97 #define STACK_SIZE_HANDLER (STACK_SIZE + 30*CYGNUM_HAL_STACK_FRAME_SIZE)
101 // If we have instrumentation enabled, make the execution time in the
102 // simulator even shorter that we were going to anyway.
103 #ifdef CYGPKG_KERNEL_INSTRUMENT
104 #define SIM_DELAY_DIVISOR 100
106 #define SIM_DELAY_DIVISOR 10
109 //-----------------------------------------------------------------------
110 // Some targets need to define a smaller number of handlers due to
111 // memory restrictions.
112 #if defined(CYGMEM_REGION_ram_SIZE) && (CYGMEM_REGION_ram_SIZE < 0x80000)
113 #define MAX_HANDLERS 4
114 #define N_LISTENERS 1
118 #undef STACK_SIZE_HANDLER
119 #define STACK_SIZE (1024 + CYGNUM_HAL_STACK_SIZE_TYPICAL)
120 #define STACK_SIZE_HANDLER (STACK_SIZE + 10*CYGNUM_HAL_STACK_FRAME_SIZE)
123 //-----------------------------------------------------------------------
124 // If no target specific definitions, use defaults
126 #define MAX_HANDLERS 19
127 #define N_LISTENERS 4
131 /* Allocate priorities in this order. This ensures that handlers
132 (which are the ones using the CPU) get enough CPU time to actually
133 complete their tasks.
135 The empty space ensures that if libc main() thread should happen to
136 be in the priority range of the handlers, no handlers are
137 accidently reduced so much in priority to get below
138 listeners/clients. */
140 #define P_MAIN_PROGRAM 1
141 #define P_MAIN_PROGRAM_E (P_MAIN_PROGRAM+N_MAIN)
143 #define P_BASE_HANDLER (P_MAIN_PROGRAM_E)
144 #define P_BASE_HANDLER_E (P_BASE_HANDLER+MAX_HANDLERS)
146 #define P_BASE_EMPTY (P_BASE_HANDLER_E)
147 #define P_BASE_EMPTY_E (P_BASE_EMPTY+2)
149 #define P_BASE_LISTENER (P_BASE_EMPTY_E)
150 #define P_BASE_LISTENER_E (P_BASE_LISTENER+N_LISTENERS)
152 #define P_BASE_CLIENT (P_BASE_LISTENER_E)
153 #define P_BASE_CLIENT_E (P_BASE_CLIENT+N_CLIENTS)
155 #define P_MAX (P_BASE_CLIENT_E)
157 /* Ensure there's room for what we request */
158 #if (CYGNUM_KERNEL_SCHED_PRIORITIES >= P_MAX)
160 /* if we use the bitmap scheduler we must make sure we don't use the
161 same priority more than once, so we must store those already in use */
162 static volatile char priority_in_use[P_MAX];
164 /* We may not get the priority we ask for (scheduler may decide to ignore
165 schedule hint). So keep a table of priorities actually assigned to
166 the threads. This information may come in handy for debugging - it's
167 not actively used by the code. */
168 static volatile int priority_translation[P_MAX];
170 /* now declare (and allocate space for) some kernel objects, like the
171 threads we will use */
172 cyg_thread main_thread_s;
173 cyg_thread handler_thread_s[MAX_HANDLERS];
174 cyg_thread listener_thread_s[N_LISTENERS];
175 cyg_thread client_thread_s[N_CLIENTS];
177 /* space for stacks for all threads */
178 char main_stack[STACK_SIZE];
179 char handler_stack[MAX_HANDLERS][STACK_SIZE_HANDLER];
180 char listener_stack[N_LISTENERS][STACK_SIZE];
181 char client_stack[N_CLIENTS][STACK_SIZE];
183 /* now the handles for the threads */
185 cyg_handle_t handlerH[MAX_HANDLERS];
186 cyg_handle_t listenerH[N_LISTENERS];
187 cyg_handle_t clientH[N_CLIENTS];
189 /* space for thread names */
190 char thread_name[P_MAX][20];
192 /* and now variables for the procedure which is the thread */
193 cyg_thread_entry_t main_program, client_program, listener_program,
196 /* a few mutexes used in the code */
197 cyg_mutex_t client_request_lock, handler_slot_lock, statistics_print_lock,
200 /* global variables with which the handler IDs and thread priorities
201 to free are communicated from handlers to main_program. Access to
202 these are protected by free_handler_lock. An id of -1 means the
203 that the variables are empty. */
204 volatile int free_handler_pri = 0;
205 volatile int free_handler_id = -1;
207 /* a global variable with which the client and server coordinate */
208 volatile int client_makes_request = 0;
210 /* if this is true, clients will not make requests */
211 volatile int clients_paused = 0;
214 /* indicates that it's time to print out a report */
215 volatile int time_to_report = 0;
216 /* print status after a delay of this many secs. */
217 int time_report_delay;
219 /*** now application-specific variables ***/
220 /* an array that stores whether the handler threads are in use */
221 volatile int handler_thread_in_use[MAX_HANDLERS];
222 /* total count of active handlers */
223 volatile int handler_thread_in_use_count;
226 /***** statistics-gathering variables *****/
227 struct s_statistics {
228 /* store the number of times each handler has been invoked */
229 unsigned long handler_invocation_histogram[MAX_HANDLERS];
231 /* store how many times malloc has been attempted and how many times
233 unsigned long malloc_tries, malloc_failures;
235 /* how many threads have been created */
236 unsigned long thread_creations, thread_exits;
239 struct s_statistics statistics;
241 /* some function prototypes; those with the sc_ prefix are
242 "statistics-collecting" versions of the cyg_ primitives */
243 cyg_addrword_t sc_thread_create(
244 cyg_addrword_t sched_info, /* scheduling info (eg pri) */
245 cyg_thread_entry_t *entry, /* entry point function */
246 cyg_addrword_t entry_data, /* entry data */
247 char *name, /* optional thread name */
248 void *stack_base, /* stack base, NULL = alloc */
249 cyg_ucount32 stack_size, /* stack size, 0 = default */
250 cyg_handle_t *handle, /* returned thread handle */
251 cyg_thread *thread /* put thread here */
254 void start_handler(void);
255 void stop_handler(int handler_id, int handler_pri);
256 void perform_stressful_tasks(void);
257 void permute_array(char a[], int size, int seed);
258 void setup_death_alarm(cyg_addrword_t data, cyg_handle_t *deathHp,
259 cyg_alarm *death_alarm_p, int *killed_p);
260 void print_statistics(int print_full);
262 /* we need to declare the alarm handling function (which is defined
263 below), so that we can pass it to cyg_alarm_initialize() */
264 cyg_alarm_t report_alarm_func, death_alarm_func;
266 /* handle and alarm for the report alarm */
267 cyg_handle_t report_alarmH, counterH, system_clockH;
268 cyg_alarm report_alarm;
270 /* main launches all the threads of the test */
277 CYG_TEST_INFO("Stress threads test compiled on " __DATE__);
279 cyg_mutex_init(&client_request_lock);
280 cyg_mutex_init(&statistics_print_lock);
281 cyg_mutex_init(&free_handler_lock);
283 /* initialize statistics */
284 memset(&statistics, 0, sizeof(statistics));
286 /* clear priority table */
287 for (i = 0; i < sizeof(priority_in_use); i++)
288 priority_in_use[i] = 0;
290 /* initialize main thread */
292 priority_translation[P_MAIN_PROGRAM] =
293 sc_thread_create(P_MAIN_PROGRAM, main_program, (cyg_addrword_t) 0,
294 "main_program", (void *) main_stack, STACK_SIZE,
295 &mainH, &main_thread_s);
296 priority_in_use[P_MAIN_PROGRAM]++;
299 /* initialize all handler threads to not be in use */
300 for (i = 0; i < MAX_HANDLERS; ++i) {
301 handler_thread_in_use[i] = 0;
303 handler_thread_in_use_count = 0;
304 for (i = 0; i < N_LISTENERS; ++i) {
305 int prio = P_BASE_LISTENER + i;
306 char* name = &thread_name[prio][0];
307 sprintf(name, "listener-%02d", i);
308 priority_translation[prio] =
309 sc_thread_create(prio, listener_program, (cyg_addrword_t) i,
310 name, (void *) listener_stack[i], STACK_SIZE,
311 &listenerH[i], &listener_thread_s[i]);
312 CYG_ASSERT(0 == priority_in_use[prio], "Priority already in use!");
313 priority_in_use[prio]++;
315 for (i = 0; i < N_CLIENTS; ++i) {
316 int prio = P_BASE_CLIENT + i;
317 char* name = &thread_name[prio][0];
318 sprintf(name, "client-%02d", i);
319 priority_translation[prio] =
320 sc_thread_create(prio, client_program, (cyg_addrword_t) i,
321 name, (void *) client_stack[i], STACK_SIZE,
322 &(clientH[i]), &client_thread_s[i]);
323 CYG_ASSERT(0 == priority_in_use[prio], "Priority already in use!");
324 priority_in_use[prio]++;
327 cyg_thread_resume(mainH);
328 for (i = 0; i < N_CLIENTS; ++i) {
329 cyg_thread_resume(clientH[i]);
331 for (i = 0; i < N_LISTENERS; ++i) {
332 cyg_thread_resume(listenerH[i]);
335 /* set up the alarm which gives periodic wakeups to say "time to
337 system_clockH = cyg_real_time_clock();
338 cyg_clock_to_counter(system_clockH, &counterH);
340 cyg_alarm_create(counterH, report_alarm_func,
341 (cyg_addrword_t) 4000,
342 &report_alarmH, &report_alarm);
343 if (cyg_test_is_simulator) {
344 time_report_delay = 2;
346 time_report_delay = 60;
349 cyg_alarm_initialize(report_alarmH, cyg_current_time()+200,
350 time_report_delay*100);
355 /* main_program() -- frees resources and prints status. */
356 void main_program(cyg_addrword_t data)
358 #ifdef DEATH_TIME_LIMIT
360 cyg_alarm death_alarm;
363 setup_death_alarm(0, &deathH, &death_alarm, &is_dead);
364 #endif /* DEATH_TIME_LIMIT */
370 cyg_mutex_lock(&free_handler_lock); {
371 // If any handler has left its ID, copy the ID and
372 // priority values to local variables, and free up the
373 // global communication variables again.
374 if (-1 != free_handler_id) {
375 handler_id = free_handler_id;
376 handler_pri = free_handler_pri;
377 free_handler_id = -1;
379 } cyg_mutex_unlock(&free_handler_lock);
381 if (-1 != handler_id) {
382 stop_handler(handler_id, handler_pri);
385 // If it's time to report status or quit, set pause flag and
386 // keep looping until all handlers have stopped.
387 if (time_to_report) {
389 cyg_mutex_lock(&client_request_lock); {
391 } cyg_mutex_unlock(&client_request_lock);
393 // When all handlers have stopped, we can print statistics
394 // knowing that all (handler allocated) resources should have
395 // been freed. That is, we should be able to determine leaks.
396 if (0 == handler_thread_in_use_count) {
399 // We've done the printing now. Resume the system.
401 cyg_mutex_lock(&client_request_lock); {
403 } cyg_mutex_unlock(&client_request_lock);
407 #ifdef DEATH_TIME_LIMIT
408 // Stop test if time.
411 cyg_mutex_lock(&client_request_lock); {
413 } cyg_mutex_unlock(&client_request_lock);
415 // When all handlers have stopped, we can print statistics
416 // knowing that all (handler allocated) resources should have
417 // been freed. That is, we should be able to determine leaks.
418 if (0 == handler_thread_in_use_count) {
420 CYG_TEST_PASS_FINISH("Kernel thread stress test OK");
423 #endif /* DEATH_TIME_LIMIT */
429 /* client_program() -- an obnoxious client which makes a lot of requests */
430 void client_program(cyg_addrword_t data)
434 system_clockH = cyg_real_time_clock();
435 cyg_clock_to_counter(system_clockH, &counterH);
438 delay = (rand() % 20);
440 /* now send a request to the server */
441 cyg_mutex_lock(&client_request_lock); {
442 if (0 == clients_paused)
443 client_makes_request++;
444 } cyg_mutex_unlock(&client_request_lock);
446 cyg_thread_delay(10+delay);
450 /* listener_program() -- listens for a request and spawns a handler to
451 take care of the request */
452 void listener_program(cyg_addrword_t data)
455 int make_request = 0;
456 cyg_mutex_lock(&client_request_lock); {
457 if (client_makes_request > 0) {
458 --client_makes_request;
461 } cyg_mutex_unlock(&client_request_lock);
466 cyg_thread_delay(2 + (rand() % 10));
470 /* handler_program() -- is spawned to handle each incoming request */
471 void handler_program(cyg_addrword_t data)
473 /* here is where we perform specific stressful tasks */
474 perform_stressful_tasks();
476 cyg_thread_delay(4 + (int) (0.5*log(1.0 + fabs((rand() % 1000000)))));
479 // Loop until the handler id and priority can be communicated to
483 cyg_mutex_lock(&free_handler_lock); {
484 if (-1 == free_handler_id) {
485 free_handler_id = data;
486 free_handler_pri = P_BASE_HANDLER+(int) data;
489 } cyg_mutex_unlock(&free_handler_lock);
499 /* start a new handler */
500 void start_handler(void)
504 int handler_slot = 0;
508 cyg_mutex_lock(&handler_slot_lock); {
509 for (handler_slot = 0; handler_slot < MAX_HANDLERS;++handler_slot){
510 if (!handler_thread_in_use[handler_slot]) {
512 handler_thread_in_use[handler_slot]++;
513 handler_thread_in_use_count++;
517 } cyg_mutex_unlock(&handler_slot_lock);
522 CYG_ASSERT(1 == handler_thread_in_use[handler_slot],
523 "Handler usage count wrong!");
525 prio = P_BASE_HANDLER+handler_slot;
526 CYG_ASSERT(0 == priority_in_use[prio], "Priority already in use!");
527 priority_in_use[prio]++;
529 name = &thread_name[prio][0];
530 sprintf(name, "handler-%02d/%02d", handler_slot, prio);
532 priority_translation[prio] =
533 sc_thread_create(prio, handler_program,
534 (cyg_addrword_t) handler_slot,
535 name, (void *) handler_stack[handler_slot],
536 STACK_SIZE_HANDLER, &handlerH[handler_slot],
537 &handler_thread_s[handler_slot]);
538 cyg_thread_resume(handlerH[handler_slot]);
539 ++statistics.handler_invocation_histogram[handler_slot];
542 /* free a locked handler thread */
543 void stop_handler(int handler_id, int handler_pri)
545 // Finally delete the handler thread. This must be done in a
546 // loop, waiting for the call to return true. If it returns
547 // false, go to sleep for a bit, so the killed thread gets a
548 // chance to run and complete its business.
549 while (!cyg_thread_delete(handlerH[handler_id])) {
552 ++statistics.thread_exits;
554 // Free the handler resources.
555 cyg_mutex_lock(&handler_slot_lock); {
556 handler_thread_in_use[handler_id]--;
557 handler_thread_in_use_count--;
558 priority_in_use[handler_pri]--;
559 CYG_ASSERT(0 == priority_in_use[handler_pri],
560 "Priority not in use!");
561 CYG_ASSERT(0 == handler_thread_in_use[handler_id],
562 "Handler not in use!");
563 CYG_ASSERT(0 <= handler_thread_in_use_count,
564 "Stopped more handlers than was started!");
565 } cyg_mutex_unlock(&handler_slot_lock);
570 /* do things which will stress the system */
571 void perform_stressful_tasks()
573 #define MAX_MALLOCED_SPACES 100 /* do this many mallocs at most */
574 #define MALLOCED_BASE_SIZE 1 /* basic size in bytes */
575 char *spaces[MAX_MALLOCED_SPACES];
576 int sizes[MAX_MALLOCED_SPACES];
577 unsigned int i, j, size;
579 cyg_uint8 pool_space[10][100];
580 cyg_handle_t mempool_handles[10];
581 cyg_mempool_fix mempool_objects[10];
583 /* here I use malloc, which uses the kernel's variable memory pools.
584 note that malloc/free is a bit simple-minded here: it does not
585 try to really fragment things, and it does not try to make the
586 allocation/deallocation concurrent with other thread execution
587 (although I'm about to throw in a yield()) */
588 for (i = 0; i < MAX_MALLOCED_SPACES; ++i) {
589 ++statistics.malloc_tries;
590 size = (i*2+1)*MALLOCED_BASE_SIZE;
591 spaces[i] = (char *) malloc(size);
594 if (spaces[i] != NULL) {
595 // Fill with a known value (differs between chunk).
596 for (j = 0; j < size; ++j) {
597 spaces[i][j] = 0x50 | ((j+i) & 0x0f);
601 if (i % (MAX_MALLOCED_SPACES/10) == 0) {
604 if (i % (MAX_MALLOCED_SPACES/15) == 0) {
605 cyg_thread_delay(i % 5);
611 /* now free it all up */
612 for (i = 0; i < MAX_MALLOCED_SPACES; ++i) {
613 if (spaces[i] != NULL) {
615 for (j = 0; j < size; ++j) {
616 // Validate chunk data.
617 if ((0x50 | ((j+i) & 0x0f)) != spaces[i][j]) {
618 printf("Bad byte in chunk\n");
620 spaces[i][j] = 0xAA; /* write a bit pattern */
624 ++statistics.malloc_failures;
628 /* now allocate and then free some fixed-size memory pools; for
629 now this is simple-minded because it does not have many threads
630 sharing the memory pools and racing for memory. */
631 for (i = 0; i < 10; ++i) {
632 cyg_mempool_fix_create(pool_space[i], 100, (i+1)*3,
633 &mempool_handles[i], &mempool_objects[i]);
636 for (i = 0; i < 10; ++i) {
637 spaces[i] = cyg_mempool_fix_try_alloc(mempool_handles[i]);
640 for (i = 0; i < 10; ++i) {
642 cyg_mempool_fix_delete(mempool_handles[i]);
647 /* report_alarm_func() is invoked as an alarm handler, so it should be
648 quick and simple. in this case it sets a global flag which is
649 checked by main_program. */
650 void report_alarm_func(cyg_handle_t alarmH, cyg_addrword_t data)
655 #ifdef DEATH_TIME_LIMIT
656 /* this sets up death alarms. it gets the handle and alarm from the
657 caller, since they must persist for the life of the alarm */
658 void setup_death_alarm(cyg_addrword_t data, cyg_handle_t *deathHp,
659 cyg_alarm *death_alarm_p, int *killed_p)
661 cyg_handle_t system_clockH, counterH;
662 cyg_resolution_t rtc_res;
664 system_clockH = cyg_real_time_clock();
665 cyg_clock_to_counter(system_clockH, &counterH);
667 cyg_alarm_create(counterH, death_alarm_func,
668 (cyg_addrword_t) killed_p,
669 deathHp, death_alarm_p);
670 rtc_res = cyg_clock_get_resolution(system_clockH);
672 cyg_tick_count_t tick_delay;
673 tick_delay = (long long)
674 ((1000000000.0*rtc_res.divisor)
675 *((double)DEATH_TIME_LIMIT)/((double)rtc_res.dividend));
676 if ( cyg_test_is_simulator )
677 tick_delay /= SIM_DELAY_DIVISOR;
678 #ifdef CYGPKG_HAL_SYNTH
679 // 20 seconds is a long time compared to the run time of other tests.
680 // Reduce to 10 seconds, allowing more tests to get run.
684 cyg_alarm_initialize(*deathHp, cyg_current_time() + tick_delay, 0);
689 /* death_alarm_func() is the alarm handler that kills the current
690 thread after a specified timeout. It does so by setting a flag the
691 thread is constantly checking. */
692 void death_alarm_func(cyg_handle_t alarmH, cyg_addrword_t data)
695 killed_p = (int *) data;
699 /* now I write the sc_ versions of the cyg_functions */
700 cyg_addrword_t sc_thread_create(
701 cyg_addrword_t sched_info, /* scheduling info (eg pri) */
702 cyg_thread_entry_t *entry, /* entry point function */
703 cyg_addrword_t entry_data, /* entry data */
704 char *name, /* optional thread name */
705 void *stack_base, /* stack base, NULL = alloc */
706 cyg_ucount32 stack_size, /* stack size, 0 = default */
707 cyg_handle_t *handle, /* returned thread handle */
708 cyg_thread *thread /* put thread here */
711 ++statistics.thread_creations;
713 cyg_thread_create(sched_info, entry, entry_data, name,
714 stack_base, stack_size, handle, thread);
716 return cyg_thread_get_priority(*handle);
720 #define MINS_HOUR (60)
721 #define MINS_DAY (60*24)
723 void print_statistics(int print_full)
726 static int stat_dumps = 0;
727 static int print_count = 0;
728 static int shift_count = 0;
733 // Find number of minutes.
734 minutes = time_report_delay*stat_dumps / 60;
737 // Return if time/minutes not integer.
738 if ((time_report_delay*stat_dumps % 60) != 0)
741 // After the first day, only dump stat once per day. Do print
742 // a . on the hour though.
743 if ((minutes > MINS_DAY) && ((minutes % MINS_DAY) != 0)) {
744 if ((minutes % MINS_HOUR) == 0) {
751 // After the first hour of the first day, only dump stat once
752 // per hour. Do print . each minute though.
753 if ((minutes < MINS_DAY) && (minutes > MINS_HOUR)
754 && ((minutes % MINS_HOUR) != 0)) {
761 printf("\nState dump %d (%d hours, %d minutes) [numbers >>%d]\n",
762 ++print_count, minutes / MINS_HOUR, minutes % MINS_HOUR,
765 cyg_mutex_lock(&statistics_print_lock); {
766 //--------------------------------
767 // Information private to this test:
768 printf(" Handler-invocations: ");
769 for (i = 0; i < MAX_HANDLERS; ++i) {
770 printf("%4lu ", statistics.handler_invocation_histogram[i]);
773 printf(" malloc()-tries/failures: -- %7lu %7lu\n",
774 statistics.malloc_tries, statistics.malloc_failures);
775 printf(" client_makes_request: %d\n", client_makes_request);
777 // Check for big numbers and reduce if getting close to overflow
778 if (statistics.malloc_tries > 0x40000000) {
780 for (i = 0; i < MAX_HANDLERS; ++i) {
781 statistics.handler_invocation_histogram[i] >>= 1;
783 statistics.malloc_tries >>= 1;
784 statistics.malloc_failures >>= 1;
786 } cyg_mutex_unlock(&statistics_print_lock);
788 #if CYGINT_ISO_MALLINFO
789 //--------------------------------
790 // System information
792 struct mallinfo mem_info;
794 mem_info = mallinfo();
796 printf(" Memory system: Total=0x%08x Free=0x%08x Max=0x%08x\n",
797 mem_info.arena, mem_info.fordblks, mem_info.maxfree);
802 printf(" Stack usage:\n");
803 cyg_test_dump_interrupt_stack_stats( " Interrupt" );
804 cyg_test_dump_idlethread_stack_stats( " Idle" );
806 cyg_test_dump_stack_stats(" Main", main_stack,
807 main_stack + sizeof(main_stack));
808 for (i = 0; i < MAX_HANDLERS; i++) {
809 cyg_test_dump_stack_stats(" Handler", handler_stack[i],
810 handler_stack[i] + sizeof(handler_stack[i]));
812 for (i = 0; i < N_LISTENERS; i++) {
813 cyg_test_dump_stack_stats(" Listener", listener_stack[i],
814 listener_stack[i] + sizeof(listener_stack[i]));
816 for (i = 0; i < N_CLIENTS; i++) {
817 cyg_test_dump_stack_stats(" Client", client_stack[i],
818 client_stack[i] + sizeof(client_stack[i]));
822 #else /* (CYGNUM_KERNEL_SCHED_PRIORITIES >= */
823 /* (N_MAIN+N_CLIENTS+N_LISTENERS+MAX_HANDLERS)) */
824 #define N_A_MSG "not enough priorities available"
825 #endif /* (CYGNUM_KERNEL_SCHED_PRIORITIES >= */
826 /* (N_MAIN+N_CLIENTS+N_LISTENERS+MAX_HANDLERS)) */
828 #else /* CYGINT_ISO_MALLOC */
829 # define N_A_MSG "this test needs malloc"
830 #endif /* CYGINT_ISO_MALLOC */
832 #else /* CYGFUN_KERNEL_THREADS_TIMER */
833 # define N_A_MSG "this test needs kernel threads timer"
834 #endif /* CYGFUN_KERNEL_THREADS_TIMER */
836 #else /* CYGPKG_LIBM */
837 # define N_A_MSG "this test needs libm"
838 #endif /* CYGPKG_LIBM */
840 #else /* CYGINT_ISO_STDIO_FORMATTED_IO */
841 # define N_A_MSG "this test needs stdio formatted I/O"
842 #endif /* CYGINT_ISO_STDIO_FORMATTED_IO */
844 #else // def CYGFUN_KERNEL_API_C
845 # define N_A_MSG "this test needs Kernel C API"
848 #else // def CYGPKG_KERNEL && CYGPKG_IO && CYGPKG_ISOINFRA
849 # define N_A_MSG "this tests needs Kernel, isoinfra and IO"
857 CYG_TEST_NA( N_A_MSG);