1 //==========================================================================
5 // Basic timing test / scaffolding
7 //==========================================================================
8 //####ECOSGPLCOPYRIGHTBEGIN####
9 // -------------------------------------------
10 // This file is part of eCos, the Embedded Configurable Operating System.
11 // Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.
12 // Copyright (C) 2002 Jonathan Larmour
14 // eCos is free software; you can redistribute it and/or modify it under
15 // the terms of the GNU General Public License as published by the Free
16 // Software Foundation; either version 2 or (at your option) any later version.
18 // eCos is distributed in the hope that it will be useful, but WITHOUT ANY
19 // WARRANTY; without even the implied warranty of MERCHANTABILITY or
20 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
23 // You should have received a copy of the GNU General Public License along
24 // with eCos; if not, write to the Free Software Foundation, Inc.,
25 // 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
27 // As a special exception, if other files instantiate templates or use macros
28 // or inline functions from this file, or you compile this file and link it
29 // with other works to produce a work based on this file, this file does not
30 // by itself cause the resulting work to be covered by the GNU General Public
31 // License. However the source code for this file must still be made available
32 // in accordance with section (3) of the GNU General Public License.
34 // This exception does not invalidate any other reasons why a work based on
35 // this file might be covered by the GNU General Public License.
37 // Alternative licenses for eCos may be arranged by contacting Red Hat, Inc.
38 // at http://sources.redhat.com/ecos/ecos-license/
39 // -------------------------------------------
40 //####ECOSGPLCOPYRIGHTEND####
41 //==========================================================================
42 //#####DESCRIPTIONBEGIN####
44 // Author(s): gthomas,nickg
45 // Contributors: jlarmour
47 // Description: Very simple kernel timing test
48 //####DESCRIPTIONEND####
49 //==========================================================================
52 #include <cyg/infra/testcase.h>
53 #include <cyg/infra/diag.h>
54 #include <pkgconf/posix.h>
55 #include <pkgconf/system.h>
57 #include <pkgconf/kernel.h>
60 #ifndef CYGPKG_POSIX_SIGNALS
61 #define NA_MSG "No POSIX signals"
62 #elif !defined(CYGPKG_POSIX_TIMERS)
63 #define NA_MSG "No POSIX timers"
64 #elif !defined(CYGPKG_POSIX_PTHREAD)
65 #define NA_MSG "POSIX threads not enabled"
66 #elif !defined(CYGFUN_KERNEL_API_C)
67 #define NA_MSG "Kernel C API not enabled"
68 #elif !defined(CYGSEM_KERNEL_SCHED_MLQUEUE)
69 #define NA_MSG "Kernel mlqueue scheduler not enabled"
70 #elif !defined(CYGVAR_KERNEL_COUNTERS_CLOCK)
71 #define NA_MSG "Kernel clock not enabled"
72 #elif CYGNUM_KERNEL_SCHED_PRIORITIES <= 12
73 #define NA_MSG "Kernel scheduler properties <= 12"
74 #elif !defined(CYGPKG_POSIX_SEMAPHORES)
75 #define NA_MSG "POSIX semaphores not enabled"
78 //==========================================================================
89 #include <pkgconf/kernel.h>
90 #include <pkgconf/hal.h>
92 #include <cyg/kernel/sched.hxx>
93 #include <cyg/kernel/thread.hxx>
94 #include <cyg/kernel/thread.inl>
95 #include <cyg/kernel/mutex.hxx>
96 #include <cyg/kernel/sema.hxx>
97 #include <cyg/kernel/sched.inl>
98 #include <cyg/kernel/clock.hxx>
99 #include <cyg/kernel/clock.inl>
100 #include <cyg/kernel/kapi.h>
102 #include <cyg/infra/testcase.h>
104 #include <cyg/kernel/test/stackmon.h>
105 #include CYGHWR_MEMORY_LAYOUT_H
110 #include <sys/types.h>
112 #include <semaphore.h>
117 //==========================================================================
118 // Define this to see the statistics with the first sample datum removed.
119 // This can expose the effects of caches on the speed of operations.
121 #undef STATS_WITHOUT_FIRST_SAMPLE
123 //==========================================================================
125 // Structure used to keep track of times
126 typedef struct fun_times {
131 //==========================================================================
133 #define STACK_SIZE (PTHREAD_STACK_MIN*2)
136 #define NTEST_THREADS 16
139 #define NSEMAPHORES 32
144 #define NTHREAD_SWITCHES 128
147 #define NSAMPLES_SIM 2
148 #define NTEST_THREADS_SIM 2
149 #define NTHREAD_SWITCHES_SIM 4
150 #define NMUTEXES_SIM 2
151 #define NMBOXES_SIM 2
152 #define NSEMAPHORES_SIM 2
153 #define NSCHEDS_SIM 4
154 #define NTIMERS_SIM 2
156 //==========================================================================
159 static int ntest_threads;
160 static int nthread_switches;
161 #ifdef CYGPKG_POSIX_PTHREAD_MUTEX
165 static int nsemaphores;
169 static char stacks[NTEST_THREADS][STACK_SIZE];
170 static pthread_t threads[NTEST_THREADS];
172 static sem_t synchro;
173 static fun_times thread_ft[NTEST_THREADS];
175 static fun_times test2_ft[NTHREAD_SWITCHES];
176 #ifdef CYGPKG_POSIX_PTHREAD_MUTEX
177 static pthread_mutex_t test_mutexes[NMUTEXES];
178 static fun_times mutex_ft[NMUTEXES];
179 static pthread_t mutex_test_thread_handle;
182 static cyg_mbox test_mboxes[NMBOXES];
183 static cyg_handle_t test_mbox_handles[NMBOXES];
184 static fun_times mbox_ft[NMBOXES];
185 static cyg_thread mbox_test_thread;
186 static cyg_handle_t mbox_test_thread_handle;
189 static sem_t test_semaphores[NSEMAPHORES];
190 static fun_times semaphore_ft[NSEMAPHORES];
191 static pthread_t semaphore_test_thread_handle;
193 static fun_times sched_ft[NSCHEDS];
195 static timer_t timers[NTIMERS];
196 static fun_times timer_ft[NTIMERS];
198 static long rtc_resolution[] = CYGNUM_KERNEL_COUNTERS_RTC_RESOLUTION;
199 static long ns_per_system_clock;
201 #if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_LATENCY)
202 // Data kept by kernel real time clock measuring clock interrupt latency
203 extern cyg_tick_count total_clock_latency, total_clock_interrupts;
204 extern cyg_int32 min_clock_latency, max_clock_latency;
205 extern bool measure_clock_latency;
208 #if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_DSR_LATENCY)
209 extern cyg_tick_count total_clock_dsr_latency, total_clock_dsr_calls;
210 extern cyg_int32 min_clock_dsr_latency, max_clock_dsr_latency;
211 extern bool measure_clock_latency;
214 //==========================================================================
216 void run_sched_tests(void);
217 void run_thread_tests(void);
218 void run_thread_switch_test(void);
219 #ifdef CYGPKG_POSIX_PTHREAD_MUTEX
220 void run_mutex_tests(void);
221 void run_mutex_circuit_test(void);
223 void run_mbox_tests(void);
224 void run_mbox_circuit_test(void);
225 void run_semaphore_tests(void);
226 void run_semaphore_circuit_test(void);
227 void run_timer_tests(void);
229 //==========================================================================
232 #define max(n,m) (m > n ? n : m)
235 //==========================================================================
236 // Wait until a clock tick [real time clock] has passed. This should keep it
237 // from happening again during a measurement, thus minimizing any fluctuations
241 cyg_tick_count_t tv0, tv1;
242 tv0 = cyg_current_time();
244 tv1 = cyg_current_time();
245 if (tv1 != tv0) break;
249 //--------------------------------------------------------------------------
250 // Display a number of ticks as microseconds
251 // Note: for improved calculation significance, values are kept in ticks*1000
253 show_ticks_in_us(cyg_uint32 ticks)
256 ns = (ns_per_system_clock * (long long)ticks) / CYGNUM_KERNEL_COUNTERS_RTC_PERIOD;
257 ns += 5; // for rounding to .01us
258 diag_printf("%5d.%02d", (int)(ns/1000), (int)((ns%1000)/10));
261 //--------------------------------------------------------------------------
263 // If the kernel is instrumented to measure clock interrupt latency, these
264 // measurements can be drastically perturbed by printing via "diag_printf()"
265 // since that code may run with interrupts disabled for long periods.
267 // In order to get accurate/reasonable latency figures _for the kernel
268 // primitive functions beint tested_, the kernel's latency measurements
269 // are suspended while the printing actually takes place.
271 // The measurements are reenabled after the printing, thus allowing for
272 // fair measurements of the kernel primitives, which are not distorted
273 // by the printing mechanisms.
275 #if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_LATENCY) && defined(HAL_CLOCK_LATENCY)
277 disable_clock_latency_measurement(void)
280 measure_clock_latency = false;
284 enable_clock_latency_measurement(void)
287 measure_clock_latency = true;
290 // Ensure that the measurements are reasonable (no startup anomalies)
292 reset_clock_latency_measurement(void)
294 disable_clock_latency_measurement();
295 total_clock_latency = 0;
296 total_clock_interrupts = 0;
297 min_clock_latency = 0x7FFFFFFF;
298 max_clock_latency = 0;
299 #if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_DSR_LATENCY)
300 total_clock_dsr_latency = 0;
301 total_clock_dsr_calls = 0;
302 min_clock_dsr_latency = 0x7FFFFFFF;
303 max_clock_dsr_latency = 0;
305 enable_clock_latency_measurement();
309 #define disable_clock_latency_measurement()
310 #define enable_clock_latency_measurement()
311 #define reset_clock_latency_measurement()
314 //--------------------------------------------------------------------------
319 disable_clock_latency_measurement();
321 diag_printf(" Confidence\n");
322 diag_printf(" Ave Min Max Var Ave Min Function\n");
323 diag_printf(" ====== ====== ====== ====== ========== ========\n");
324 enable_clock_latency_measurement();
328 show_times_detail(fun_times ft[], int nsamples, char *title, bool ignore_first)
330 int i, delta, min, max, con_ave, con_min, ave_dev;
331 int start_sample, total_samples;
332 cyg_int32 total, ave;
336 total_samples = nsamples-1;
339 total_samples = nsamples;
344 for (i = start_sample; i < nsamples; i++) {
345 if (ft[i].end < ft[i].start) {
346 // Clock wrapped around (timer tick)
347 delta = (ft[i].end+CYGNUM_KERNEL_COUNTERS_RTC_PERIOD) - ft[i].start;
349 delta = ft[i].end - ft[i].start;
352 if (delta < 0) delta = 0;
355 if (delta < min) min = delta;
356 if (delta > max) max = delta;
358 ave = total / total_samples;
361 for (i = start_sample; i < nsamples; i++) {
362 if (ft[i].end < ft[i].start) {
363 // Clock wrapped around (timer tick)
364 delta = (ft[i].end+CYGNUM_KERNEL_COUNTERS_RTC_PERIOD) - ft[i].start;
366 delta = ft[i].end - ft[i].start;
369 if (delta < 0) delta = 0;
372 if (delta < 0) delta = -delta;
375 ave_dev /= total_samples;
378 for (i = start_sample; i < nsamples; i++) {
379 if (ft[i].end < ft[i].start) {
380 // Clock wrapped around (timer tick)
381 delta = (ft[i].end+CYGNUM_KERNEL_COUNTERS_RTC_PERIOD) - ft[i].start;
383 delta = ft[i].end - ft[i].start;
386 if (delta < 0) delta = 0;
388 if ((delta <= (ave+ave_dev)) && (delta >= (ave-ave_dev))) con_ave++;
389 if ((delta <= (min+ave_dev)) && (delta >= (min-ave_dev))) con_min++;
391 con_ave = (con_ave * 100) / total_samples;
392 con_min = (con_min * 100) / total_samples;
393 show_ticks_in_us(ave);
394 show_ticks_in_us(min);
395 show_ticks_in_us(max);
396 show_ticks_in_us(ave_dev);
397 disable_clock_latency_measurement();
398 diag_printf(" %3d%% %3d%%", con_ave, con_min);
399 diag_printf(" %s\n", title);
400 enable_clock_latency_measurement();
404 show_times(fun_times ft[], int nsamples, char *title)
406 show_times_detail(ft, nsamples, title, false);
407 #ifdef STATS_WITHOUT_FIRST_SAMPLE
408 show_times_detail(ft, nsamples, "", true);
412 //--------------------------------------------------------------------------
415 show_test_parameters(void)
417 disable_clock_latency_measurement();
418 diag_printf("\nTesting parameters:\n");
419 diag_printf(" Clock samples: %5d\n", nsamples);
420 diag_printf(" Threads: %5d\n", ntest_threads);
421 diag_printf(" Thread switches: %5d\n", nthread_switches);
422 #ifdef CYGPKG_POSIX_PTHREAD_MUTEX
423 diag_printf(" Mutexes: %5d\n", nmutexes);
425 diag_printf(" Mailboxes: %5d\n", nmboxes);
426 diag_printf(" Semaphores: %5d\n", nsemaphores);
427 diag_printf(" Scheduler operations: %5d\n", nscheds);
428 diag_printf(" Timers: %5d\n", ntimers);
430 enable_clock_latency_measurement();
434 end_of_test_group(void)
436 disable_clock_latency_measurement();
438 enable_clock_latency_measurement();
441 //--------------------------------------------------------------------------
442 // Compute a name for a thread
445 thread_name(char *basename, int indx) {
446 return "<<NULL>>"; // Not currently used
449 //--------------------------------------------------------------------------
450 // test0 - null test, just return
458 //--------------------------------------------------------------------------
459 // test3 - loop, yeilding repeatedly and checking for cancellation
467 pthread_testcancel();
473 //--------------------------------------------------------------------------
474 // test1 - empty test, simply exit. Last thread signals parent.
479 if ((cyg_uint32)indx == (cyg_uint32)(ntest_threads-1)) {
480 sem_post(&synchro); // Signal that last thread is dying
485 //--------------------------------------------------------------------------
486 // test2 - measure thread switch times
492 for (i = 0; i < nthread_switches; i++) {
493 if ((int)indx == 0) {
494 HAL_CLOCK_READ(&test2_ft[i].start);
496 HAL_CLOCK_READ(&test2_ft[i].end);
500 if ((int)indx == 1) {
506 #ifdef CYGPKG_POSIX_PTHREAD_MUTEX
507 //--------------------------------------------------------------------------
508 // Full-circuit mutex unlock/lock test
511 mutex_test(void * indx)
514 pthread_mutex_lock(&test_mutexes[0]);
515 for (i = 0; i < nmutexes; i++) {
517 wait_for_tick(); // Wait until the next clock tick to minimize aberations
518 HAL_CLOCK_READ(&mutex_ft[i].start);
519 pthread_mutex_unlock(&test_mutexes[0]);
520 pthread_mutex_lock(&test_mutexes[0]);
526 //--------------------------------------------------------------------------
527 // Full-circuit mbox put/get test
531 mbox_test(cyg_uint32 indx)
535 item = cyg_mbox_get(test_mbox_handles[0]);
536 HAL_CLOCK_READ(&mbox_ft[(int)item].end);
537 cyg_semaphore_post(&synchro);
538 } while ((int)item != (nmboxes-1));
543 //--------------------------------------------------------------------------
544 // Full-circuit semaphore post/wait test
547 semaphore_test(void * indx)
550 for (i = 0; i < nsemaphores; i++) {
551 sem_wait(&test_semaphores[0]);
552 HAL_CLOCK_READ(&semaphore_ft[i].end);
558 //--------------------------------------------------------------------------
560 // This set of tests is used to measure kernel primitives that deal with threads
564 run_thread_tests(void)
569 struct sched_param schedparam;
574 // Set my priority higher than any I plan to create
575 schedparam.sched_priority = 30;
576 pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
578 // Initiaize thread creation attributes
580 pthread_attr_init( &attr );
581 pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
582 pthread_attr_setschedpolicy( &attr, SCHED_RR );
583 schedparam.sched_priority = 10;
584 pthread_attr_setschedparam( &attr, &schedparam );
587 wait_for_tick(); // Wait until the next clock tick to minimize aberations
588 for (i = 0; i < ntest_threads; i++) {
589 HAL_CLOCK_READ(&thread_ft[i].start);
591 pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
592 pthread_attr_setstacksize( &attr, STACK_SIZE );
593 pthread_create( &threads[i],
599 HAL_CLOCK_READ(&thread_ft[i].end);
601 show_times(thread_ft, ntest_threads, "Create thread");
603 wait_for_tick(); // Wait until the next clock tick to minimize aberations
604 for (i = 0; i < ntest_threads; i++) {
605 HAL_CLOCK_READ(&thread_ft[i].start);
607 HAL_CLOCK_READ(&thread_ft[i].end);
609 show_times(thread_ft, ntest_threads, "Yield thread [all lower priority]");
611 wait_for_tick(); // Wait until the next clock tick to minimize aberations
612 for (i = 0; i < ntest_threads; i++) {
613 HAL_CLOCK_READ(&thread_ft[i].start);
615 schedparam.sched_priority = 11;
616 pthread_attr_setschedparam( &attr, &schedparam );
617 pthread_setschedparam(threads[i], SCHED_RR, &schedparam);
619 HAL_CLOCK_READ(&thread_ft[i].end);
621 show_times(thread_ft, ntest_threads, "Set priority");
623 wait_for_tick(); // Wait until the next clock tick to minimize aberations
624 for (i = 0; i < ntest_threads; i++) {
625 HAL_CLOCK_READ(&thread_ft[i].start);
626 pthread_getschedparam( threads[i], &policy, &schedparam );
627 HAL_CLOCK_READ(&thread_ft[i].end);
629 show_times(thread_ft, ntest_threads, "Get priority");
631 cyg_thread_delay(1); // Let the test threads run
633 wait_for_tick(); // Wait until the next clock tick to minimize aberations
634 for (i = 0; i < ntest_threads; i++) {
635 HAL_CLOCK_READ(&thread_ft[i].start);
636 pthread_join(threads[i], &retval);
637 HAL_CLOCK_READ(&thread_ft[i].end);
639 show_times(thread_ft, ntest_threads, "Join exited thread");
641 wait_for_tick(); // Wait until the next clock tick to minimize aberations
642 for (i = 0; i < ntest_threads; i++) {
643 HAL_CLOCK_READ(&thread_ft[i].start);
645 HAL_CLOCK_READ(&thread_ft[i].end);
647 show_times(thread_ft, ntest_threads, "Yield [no other] thread");
650 // Recreate the test set
652 schedparam.sched_priority = 10;
653 pthread_attr_setschedparam( &attr, &schedparam );
655 for (i = 0; i < ntest_threads; i++) {
656 pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
657 pthread_attr_setstacksize( &attr, STACK_SIZE );
658 pthread_create( &threads[i],
665 cyg_thread_delay(1); // Let the test threads run
667 wait_for_tick(); // Wait until the next clock tick to minimize aberations
668 for (i = 0; i < ntest_threads; i++) {
669 HAL_CLOCK_READ(&thread_ft[i].start);
670 pthread_cancel(threads[i]);
671 HAL_CLOCK_READ(&thread_ft[i].end);
673 show_times(thread_ft, ntest_threads, "Cancel [running] thread");
675 cyg_thread_delay(1); // Let the test threads do their cancellations
677 wait_for_tick(); // Wait until the next clock tick to minimize aberations
678 for (i = 0; i < ntest_threads; i++) {
679 HAL_CLOCK_READ(&thread_ft[i].start);
680 pthread_join(threads[i], &retval);
681 HAL_CLOCK_READ(&thread_ft[i].end);
683 show_times(thread_ft, ntest_threads, "Join [cancelled] thread");
686 // Set my priority lower than any I plan to create
687 schedparam.sched_priority = 5;
688 pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
690 // Set up the end-of-threads synchronizer
691 sem_init(&synchro, 0, 0);
693 schedparam.sched_priority = 10;
694 pthread_attr_setschedparam( &attr, &schedparam );
696 wait_for_tick(); // Wait until the next clock tick to minimize aberations
697 for (i = 0; i < ntest_threads; i++) {
698 HAL_CLOCK_READ(&thread_ft[i].start);
700 pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
701 pthread_attr_setstacksize( &attr, STACK_SIZE );
702 pthread_create( &threads[i],
708 HAL_CLOCK_READ(&thread_ft[i].end);
710 show_times(thread_ft, ntest_threads, "Create [high priority] thread");
712 sem_wait(&synchro); // Wait for all threads to finish
714 // Make sure they are all dead
715 for (i = 0; i < ntest_threads; i++) {
716 pthread_join(threads[i], &retval);
719 run_thread_switch_test();
724 //--------------------------------------------------------------------------
727 run_thread_switch_test(void)
731 struct sched_param schedparam;
735 // Set my priority higher than any I plan to create
736 schedparam.sched_priority = 30;
737 pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
739 // Initiaize thread creation attributes
741 pthread_attr_init( &attr );
742 pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
743 pthread_attr_setschedpolicy( &attr, SCHED_RR );
744 schedparam.sched_priority = 10;
745 pthread_attr_setschedparam( &attr, &schedparam );
747 // Set up the end-of-threads synchronizer
749 sem_init(&synchro, 0, 0);
751 // Set up for thread context switch
753 for (i = 0; i < 2; i++) {
754 pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
755 pthread_attr_setstacksize( &attr, STACK_SIZE );
756 pthread_create( &threads[i],
763 wait_for_tick(); // Wait until the next clock tick to minimize aberations
767 show_times(test2_ft, nthread_switches, "Thread switch");
770 for (i = 0; i < 2; i++) {
771 pthread_join(threads[i], &retval);
777 //--------------------------------------------------------------------------
778 #ifdef CYGPKG_POSIX_PTHREAD_MUTEX
780 run_mutex_tests(void)
784 pthread_mutexattr_t attr;
786 pthread_mutexattr_init( &attr );
789 wait_for_tick(); // Wait until the next clock tick to minimize aberations
790 for (i = 0; i < nmutexes; i++) {
791 HAL_CLOCK_READ(&mutex_ft[i].start);
792 pthread_mutex_init(&test_mutexes[i], &attr);
793 HAL_CLOCK_READ(&mutex_ft[i].end);
795 show_times(mutex_ft, nmutexes, "Init mutex");
798 wait_for_tick(); // Wait until the next clock tick to minimize aberations
799 for (i = 0; i < nmutexes; i++) {
800 HAL_CLOCK_READ(&mutex_ft[i].start);
801 pthread_mutex_lock(&test_mutexes[i]);
802 HAL_CLOCK_READ(&mutex_ft[i].end);
804 show_times(mutex_ft, nmutexes, "Lock [unlocked] mutex");
806 wait_for_tick(); // Wait until the next clock tick to minimize aberations
807 for (i = 0; i < nmutexes; i++) {
808 HAL_CLOCK_READ(&mutex_ft[i].start);
809 pthread_mutex_unlock(&test_mutexes[i]);
810 HAL_CLOCK_READ(&mutex_ft[i].end);
812 show_times(mutex_ft, nmutexes, "Unlock [locked] mutex");
814 wait_for_tick(); // Wait until the next clock tick to minimize aberations
815 for (i = 0; i < nmutexes; i++) {
816 HAL_CLOCK_READ(&mutex_ft[i].start);
817 pthread_mutex_trylock(&test_mutexes[i]);
818 HAL_CLOCK_READ(&mutex_ft[i].end);
820 show_times(mutex_ft, nmutexes, "Trylock [unlocked] mutex");
822 wait_for_tick(); // Wait until the next clock tick to minimize aberations
823 for (i = 0; i < nmutexes; i++) {
824 HAL_CLOCK_READ(&mutex_ft[i].start);
825 pthread_mutex_trylock(&test_mutexes[i]);
826 HAL_CLOCK_READ(&mutex_ft[i].end);
828 show_times(mutex_ft, nmutexes, "Trylock [locked] mutex");
830 // Must unlock mutices before destroying them.
831 for (i = 0; i < nmutexes; i++) {
832 pthread_mutex_unlock(&test_mutexes[i]);
835 wait_for_tick(); // Wait until the next clock tick to minimize aberations
836 for (i = 0; i < nmutexes; i++) {
837 HAL_CLOCK_READ(&mutex_ft[i].start);
838 pthread_mutex_destroy(&test_mutexes[i]);
839 HAL_CLOCK_READ(&mutex_ft[i].end);
841 show_times(mutex_ft, nmutexes, "Destroy mutex");
844 run_mutex_circuit_test();
848 //--------------------------------------------------------------------------
851 run_mutex_circuit_test(void)
854 pthread_mutexattr_t mattr;
855 struct sched_param schedparam;
859 // Set my priority lower than any I plan to create
860 schedparam.sched_priority = 5;
861 pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
863 // Initiaize thread creation attributes
865 pthread_attr_init( &attr );
866 pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
867 pthread_attr_setschedpolicy( &attr, SCHED_RR );
868 schedparam.sched_priority = 10;
869 pthread_attr_setschedparam( &attr, &schedparam );
871 // Set up for full mutex unlock/lock test
872 pthread_mutexattr_init( &mattr );
873 pthread_mutex_init(&test_mutexes[0], &mattr);
874 sem_init(&synchro, 0, 0);
876 pthread_attr_setstackaddr( &attr, &stacks[0][STACK_SIZE] );
877 pthread_attr_setstacksize( &attr, STACK_SIZE );
878 pthread_create( &mutex_test_thread_handle,
884 // Need to raise priority so that this thread will block on the "lock"
885 schedparam.sched_priority = 20;
886 pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
888 for (i = 0; i < nmutexes; i++) {
890 pthread_mutex_lock(&test_mutexes[0]);
891 HAL_CLOCK_READ(&mutex_ft[i].end);
892 pthread_mutex_unlock(&test_mutexes[0]);
895 pthread_join(mutex_test_thread_handle, &retval);
896 show_times(mutex_ft, nmutexes, "Unlock/Lock mutex");
901 //--------------------------------------------------------------------------
902 // Message queue tests
904 // Currently disabled, pending implementation of POSIX message queues
912 // Mailbox primitives
913 wait_for_tick(); // Wait until the next clock tick to minimize aberations
914 for (i = 0; i < nmboxes; i++) {
915 HAL_CLOCK_READ(&mbox_ft[i].start);
916 cyg_mbox_create(&test_mbox_handles[i], &test_mboxes[i]);
917 HAL_CLOCK_READ(&mbox_ft[i].end);
919 show_times(mbox_ft, nmboxes, "Create mbox");
921 wait_for_tick(); // Wait until the next clock tick to minimize aberations
922 for (i = 0; i < nmboxes; i++) {
923 HAL_CLOCK_READ(&mbox_ft[i].start);
924 cnt = cyg_mbox_peek(test_mbox_handles[i]);
925 HAL_CLOCK_READ(&mbox_ft[i].end);
927 show_times(mbox_ft, nmboxes, "Peek [empty] mbox");
929 #ifdef CYGMFN_KERNEL_SYNCH_MBOXT_PUT_CAN_WAIT
930 wait_for_tick(); // Wait until the next clock tick to minimize aberations
931 for (i = 0; i < nmboxes; i++) {
932 HAL_CLOCK_READ(&mbox_ft[i].start);
933 cyg_mbox_put(test_mbox_handles[i], (void *)i);
934 HAL_CLOCK_READ(&mbox_ft[i].end);
936 show_times(mbox_ft, nmboxes, "Put [first] mbox");
938 wait_for_tick(); // Wait until the next clock tick to minimize aberations
939 for (i = 0; i < nmboxes; i++) {
940 HAL_CLOCK_READ(&mbox_ft[i].start);
941 cnt = cyg_mbox_peek(test_mbox_handles[i]);
942 HAL_CLOCK_READ(&mbox_ft[i].end);
944 show_times(mbox_ft, nmboxes, "Peek [1 msg] mbox");
946 wait_for_tick(); // Wait until the next clock tick to minimize aberations
947 for (i = 0; i < nmboxes; i++) {
948 HAL_CLOCK_READ(&mbox_ft[i].start);
949 cyg_mbox_put(test_mbox_handles[i], (void *)i);
950 HAL_CLOCK_READ(&mbox_ft[i].end);
952 show_times(mbox_ft, nmboxes, "Put [second] mbox");
954 wait_for_tick(); // Wait until the next clock tick to minimize aberations
955 for (i = 0; i < nmboxes; i++) {
956 HAL_CLOCK_READ(&mbox_ft[i].start);
957 cnt = cyg_mbox_peek(test_mbox_handles[i]);
958 HAL_CLOCK_READ(&mbox_ft[i].end);
960 show_times(mbox_ft, nmboxes, "Peek [2 msgs] mbox");
962 wait_for_tick(); // Wait until the next clock tick to minimize aberations
963 for (i = 0; i < nmboxes; i++) {
964 HAL_CLOCK_READ(&mbox_ft[i].start);
965 item = cyg_mbox_get(test_mbox_handles[i]);
966 HAL_CLOCK_READ(&mbox_ft[i].end);
968 show_times(mbox_ft, nmboxes, "Get [first] mbox");
970 wait_for_tick(); // Wait until the next clock tick to minimize aberations
971 for (i = 0; i < nmboxes; i++) {
972 HAL_CLOCK_READ(&mbox_ft[i].start);
973 item = cyg_mbox_get(test_mbox_handles[i]);
974 HAL_CLOCK_READ(&mbox_ft[i].end);
976 show_times(mbox_ft, nmboxes, "Get [second] mbox");
977 #endif // ifdef CYGMFN_KERNEL_SYNCH_MBOXT_PUT_CAN_WAIT
979 wait_for_tick(); // Wait until the next clock tick to minimize aberations
980 for (i = 0; i < nmboxes; i++) {
981 HAL_CLOCK_READ(&mbox_ft[i].start);
982 cyg_mbox_tryput(test_mbox_handles[i], (void *)i);
983 HAL_CLOCK_READ(&mbox_ft[i].end);
985 show_times(mbox_ft, nmboxes, "Tryput [first] mbox");
987 wait_for_tick(); // Wait until the next clock tick to minimize aberations
988 for (i = 0; i < nmboxes; i++) {
989 HAL_CLOCK_READ(&mbox_ft[i].start);
990 item = cyg_mbox_peek_item(test_mbox_handles[i]);
991 HAL_CLOCK_READ(&mbox_ft[i].end);
993 show_times(mbox_ft, nmboxes, "Peek item [non-empty] mbox");
995 wait_for_tick(); // Wait until the next clock tick to minimize aberations
996 for (i = 0; i < nmboxes; i++) {
997 HAL_CLOCK_READ(&mbox_ft[i].start);
998 item = cyg_mbox_tryget(test_mbox_handles[i]);
999 HAL_CLOCK_READ(&mbox_ft[i].end);
1001 show_times(mbox_ft, nmboxes, "Tryget [non-empty] mbox");
1003 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1004 for (i = 0; i < nmboxes; i++) {
1005 HAL_CLOCK_READ(&mbox_ft[i].start);
1006 item = cyg_mbox_peek_item(test_mbox_handles[i]);
1007 HAL_CLOCK_READ(&mbox_ft[i].end);
1009 show_times(mbox_ft, nmboxes, "Peek item [empty] mbox");
1011 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1012 for (i = 0; i < nmboxes; i++) {
1013 HAL_CLOCK_READ(&mbox_ft[i].start);
1014 item = cyg_mbox_tryget(test_mbox_handles[i]);
1015 HAL_CLOCK_READ(&mbox_ft[i].end);
1017 show_times(mbox_ft, nmboxes, "Tryget [empty] mbox");
1019 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1020 for (i = 0; i < nmboxes; i++) {
1021 HAL_CLOCK_READ(&mbox_ft[i].start);
1022 cyg_mbox_waiting_to_get(test_mbox_handles[i]);
1023 HAL_CLOCK_READ(&mbox_ft[i].end);
1025 show_times(mbox_ft, nmboxes, "Waiting to get mbox");
1027 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1028 for (i = 0; i < nmboxes; i++) {
1029 HAL_CLOCK_READ(&mbox_ft[i].start);
1030 cyg_mbox_waiting_to_put(test_mbox_handles[i]);
1031 HAL_CLOCK_READ(&mbox_ft[i].end);
1033 show_times(mbox_ft, nmboxes, "Waiting to put mbox");
1035 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1036 for (i = 0; i < nmboxes; i++) {
1037 HAL_CLOCK_READ(&mbox_ft[i].start);
1038 cyg_mbox_delete(test_mbox_handles[i]);
1039 HAL_CLOCK_READ(&mbox_ft[i].end);
1041 show_times(mbox_ft, nmboxes, "Delete mbox");
1043 run_mbox_circuit_test();
1044 end_of_test_group();
1047 //--------------------------------------------------------------------------
1050 run_mbox_circuit_test(void)
1052 #ifdef CYGMFN_KERNEL_SYNCH_MBOXT_PUT_CAN_WAIT
1054 // Set my priority lower than any I plan to create
1055 cyg_thread_set_priority(cyg_thread_self(), 3);
1056 // Set up for full mbox put/get test
1057 cyg_mbox_create(&test_mbox_handles[0], &test_mboxes[0]);
1058 cyg_semaphore_init(&synchro, 0);
1059 cyg_thread_create(2, // Priority - just a number
1062 thread_name("thread", 0), // Name
1063 &stacks[0][0], // Stack
1065 &mbox_test_thread_handle, // Handle
1066 &mbox_test_thread // Thread data structure
1068 cyg_thread_resume(mbox_test_thread_handle);
1069 for (i = 0; i < nmboxes; i++) {
1070 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1071 HAL_CLOCK_READ(&mbox_ft[i].start);
1072 cyg_mbox_put(test_mbox_handles[0], (void *)i);
1073 cyg_semaphore_wait(&synchro);
1075 cyg_thread_delete(mbox_test_thread_handle);
1076 show_times(mbox_ft, nmboxes, "Put/Get mbox");
1082 //--------------------------------------------------------------------------
1085 run_semaphore_tests(void)
1091 // Semaphore primitives
1092 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1093 for (i = 0; i < nsemaphores; i++) {
1094 HAL_CLOCK_READ(&semaphore_ft[i].start);
1095 sem_init(&test_semaphores[i], 0, 0);
1096 HAL_CLOCK_READ(&semaphore_ft[i].end);
1098 show_times(semaphore_ft, nsemaphores, "Init semaphore");
1100 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1101 for (i = 0; i < nsemaphores; i++) {
1102 HAL_CLOCK_READ(&semaphore_ft[i].start);
1103 sem_post(&test_semaphores[i]);
1104 HAL_CLOCK_READ(&semaphore_ft[i].end);
1106 show_times(semaphore_ft, nsemaphores, "Post [0] semaphore");
1108 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1109 for (i = 0; i < nsemaphores; i++) {
1110 HAL_CLOCK_READ(&semaphore_ft[i].start);
1111 sem_wait(&test_semaphores[i]);
1112 HAL_CLOCK_READ(&semaphore_ft[i].end);
1114 show_times(semaphore_ft, nsemaphores, "Wait [1] semaphore");
1116 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1117 for (i = 0; i < nsemaphores; i++) {
1118 HAL_CLOCK_READ(&semaphore_ft[i].start);
1119 sem_trywait(&test_semaphores[i]);
1120 HAL_CLOCK_READ(&semaphore_ft[i].end);
1122 show_times(semaphore_ft, nsemaphores, "Trywait [0] semaphore");
1124 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1125 for (i = 0; i < nsemaphores; i++) {
1126 sem_post(&test_semaphores[i]);
1127 HAL_CLOCK_READ(&semaphore_ft[i].start);
1128 sem_trywait(&test_semaphores[i]);
1129 HAL_CLOCK_READ(&semaphore_ft[i].end);
1131 show_times(semaphore_ft, nsemaphores, "Trywait [1] semaphore");
1133 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1134 for (i = 0; i < nsemaphores; i++) {
1135 HAL_CLOCK_READ(&semaphore_ft[i].start);
1136 sem_getvalue(&test_semaphores[i], &sem_val);
1137 HAL_CLOCK_READ(&semaphore_ft[i].end);
1139 show_times(semaphore_ft, nsemaphores, "Get value of semaphore");
1141 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1142 for (i = 0; i < nsemaphores; i++) {
1143 HAL_CLOCK_READ(&semaphore_ft[i].start);
1144 sem_destroy(&test_semaphores[i]);
1145 HAL_CLOCK_READ(&semaphore_ft[i].end);
1147 show_times(semaphore_ft, nsemaphores, "Destroy semaphore");
1149 run_semaphore_circuit_test();
1150 end_of_test_group();
1153 //--------------------------------------------------------------------------
1156 run_semaphore_circuit_test(void)
1160 struct sched_param schedparam;
1161 pthread_attr_t attr;
1164 // Set my priority lower than any I plan to create
1165 schedparam.sched_priority = 5;
1166 pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
1168 // Initiaize thread creation attributes
1170 pthread_attr_init( &attr );
1171 pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
1172 pthread_attr_setschedpolicy( &attr, SCHED_RR );
1173 schedparam.sched_priority = 10;
1174 pthread_attr_setschedparam( &attr, &schedparam );
1176 // Set up for full semaphore post/wait test
1177 sem_init(&test_semaphores[0], 0, 0);
1178 sem_init(&synchro, 0, 0);
1180 pthread_attr_setstackaddr( &attr, &stacks[0][STACK_SIZE] );
1181 pthread_attr_setstacksize( &attr, STACK_SIZE );
1182 pthread_create( &semaphore_test_thread_handle,
1189 for (i = 0; i < nsemaphores; i++) {
1190 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1191 HAL_CLOCK_READ(&semaphore_ft[i].start);
1192 sem_post(&test_semaphores[0]);
1195 pthread_join(semaphore_test_thread_handle, &retval);
1197 show_times(semaphore_ft, nsemaphores, "Post/Wait semaphore");
1202 //--------------------------------------------------------------------------
1204 // Timer callback function
1206 sigrt0(int signo, siginfo_t *info, void *context)
1208 diag_printf("sigrt0 called\n");
1212 // Callback used to test determinancy
1213 static volatile int timer_cnt;
1215 sigrt1(int signo, siginfo_t *info, void *context)
1217 if (timer_cnt == nscheds) return;
1218 sched_ft[timer_cnt].start = 0;
1219 HAL_CLOCK_READ(&sched_ft[timer_cnt++].end);
1220 if (timer_cnt == nscheds) {
1225 static sem_t timer_sem;
1228 sigrt2(int signo, siginfo_t *info, void *context)
1230 if (timer_cnt == nscheds) {
1232 sem_post(&timer_sem);
1234 sched_ft[timer_cnt].start = 0;
1235 sem_post(&timer_sem);
1239 // Null thread, used to keep scheduler busy
1241 timer_test(void * id)
1245 pthread_testcancel();
1251 // Thread that suspends itself at the first opportunity
1253 timer_test2(void *id)
1255 while (timer_cnt != nscheds) {
1256 HAL_CLOCK_READ(&sched_ft[timer_cnt++].end);
1257 sem_wait(&timer_sem);
1263 run_timer_tests(void)
1267 struct sigaction sa;
1268 struct sigevent sigev;
1269 struct itimerspec tp;
1271 // Install signal handlers
1272 sigemptyset( &sa.sa_mask );
1273 sa.sa_flags = SA_SIGINFO;
1275 sa.sa_sigaction = sigrt0;
1276 sigaction( SIGRTMIN, &sa, NULL );
1278 sa.sa_sigaction = sigrt1;
1279 sigaction( SIGRTMIN+1, &sa, NULL );
1281 sa.sa_sigaction = sigrt2;
1282 sigaction( SIGRTMIN+2, &sa, NULL );
1284 // Set up common bits of sigevent
1286 sigev.sigev_notify = SIGEV_SIGNAL;
1288 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1289 for (i = 0; i < ntimers; i++) {
1290 HAL_CLOCK_READ(&timer_ft[i].start);
1291 sigev.sigev_signo = SIGRTMIN;
1292 sigev.sigev_value.sival_ptr = (void*)(&timers[i]);
1293 res = timer_create( CLOCK_REALTIME, &sigev, &timers[i]);
1294 HAL_CLOCK_READ(&timer_ft[i].end);
1295 CYG_ASSERT( res == 0 , "timer_create() returned error");
1297 show_times(timer_ft, ntimers, "Create timer");
1300 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1301 tp.it_value.tv_sec = 0;
1302 tp.it_value.tv_nsec = 0;
1303 tp.it_interval.tv_sec = 0;
1304 tp.it_interval.tv_nsec = 0;
1305 for (i = 0; i < ntimers; i++) {
1306 HAL_CLOCK_READ(&timer_ft[i].start);
1307 res = timer_settime( timers[i], 0, &tp, NULL );
1308 HAL_CLOCK_READ(&timer_ft[i].end);
1309 CYG_ASSERT( res == 0 , "timer_settime() returned error");
1311 show_times(timer_ft, ntimers, "Initialize timer to zero");
1313 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1314 tp.it_value.tv_sec = 1;
1315 tp.it_value.tv_nsec = 250000000;
1316 tp.it_interval.tv_sec = 0;
1317 tp.it_interval.tv_nsec = 0;
1318 for (i = 0; i < ntimers; i++) {
1319 HAL_CLOCK_READ(&timer_ft[i].start);
1320 res = timer_settime( timers[i], 0, &tp, NULL );
1321 HAL_CLOCK_READ(&timer_ft[i].end);
1322 CYG_ASSERT( res == 0 , "timer_settime() returned error");
1324 show_times(timer_ft, ntimers, "Initialize timer to 1.25 sec");
1326 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1327 tp.it_value.tv_sec = 0;
1328 tp.it_value.tv_nsec = 0;
1329 tp.it_interval.tv_sec = 0;
1330 tp.it_interval.tv_nsec = 0;
1331 for (i = 0; i < ntimers; i++) {
1332 HAL_CLOCK_READ(&timer_ft[i].start);
1333 res = timer_settime( timers[i], 0, &tp, NULL );
1334 HAL_CLOCK_READ(&timer_ft[i].end);
1335 CYG_ASSERT( res == 0 , "timer_settime() returned error");
1337 show_times(timer_ft, ntimers, "Disable timer");
1339 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1340 for (i = 0; i < ntimers; i++) {
1341 HAL_CLOCK_READ(&timer_ft[i].start);
1342 res = timer_delete( timers[i] );
1343 HAL_CLOCK_READ(&timer_ft[i].end);
1344 CYG_ASSERT( res == 0 , "timer_settime() returned error");
1346 show_times(timer_ft, ntimers, "Delete timer");
1350 sigev.sigev_signo = SIGRTMIN+1;
1351 sigev.sigev_value.sival_ptr = (void*)(&timers[i]);
1352 res = timer_create( CLOCK_REALTIME, &sigev, &timers[0]);
1353 CYG_ASSERT( res == 0 , "timer_create() returned error");
1354 tp.it_value.tv_sec = 0;
1355 tp.it_value.tv_nsec = 50000000;
1356 tp.it_interval.tv_sec = 0;
1357 tp.it_interval.tv_nsec = 50000000;;
1359 res = timer_settime( timers[0], 0, &tp, NULL );
1360 CYG_ASSERT( res == 0 , "timer_settime() returned error");
1361 sem_init(&synchro, 0, 0);
1362 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1364 { res = sem_wait(&synchro);
1365 } while( res == -1 && errno == EINTR );
1366 CYG_ASSERT( res == 0 , "sem_wait() returned error");
1367 tp.it_value.tv_sec = 0;
1368 tp.it_value.tv_nsec = 0;
1369 tp.it_interval.tv_sec = 0;
1370 tp.it_interval.tv_nsec = 0;
1371 res = timer_settime( timers[0], 0, &tp, NULL );
1372 CYG_ASSERT( res == 0 , "timer_settime() returned error");
1373 res = timer_delete( timers[0] );
1374 CYG_ASSERT( res == 0 , "timer_delete() returned error");
1375 show_times(sched_ft, nscheds, "Timer latency [0 threads]");
1380 struct sched_param schedparam;
1381 pthread_attr_t attr;
1384 // Set my priority higher than any I plan to create
1385 schedparam.sched_priority = 20;
1386 pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
1389 // Initiaize thread creation attributes
1391 pthread_attr_init( &attr );
1392 pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
1393 pthread_attr_setschedpolicy( &attr, SCHED_RR );
1394 schedparam.sched_priority = 10;
1395 pthread_attr_setschedparam( &attr, &schedparam );
1397 for (i = 0; i < 2; i++) {
1398 pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
1399 pthread_attr_setstacksize( &attr, STACK_SIZE );
1400 res = pthread_create( &threads[i],
1405 CYG_ASSERT( res == 0 , "pthread_create() returned error");
1408 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1410 sigev.sigev_signo = SIGRTMIN+1;
1411 sigev.sigev_value.sival_ptr = (void*)(&timers[i]);
1412 res = timer_create( CLOCK_REALTIME, &sigev, &timers[0]);
1413 CYG_ASSERT( res == 0 , "timer_create() returned error");
1414 tp.it_value.tv_sec = 0;
1415 tp.it_value.tv_nsec = 50000000;
1416 tp.it_interval.tv_sec = 0;
1417 tp.it_interval.tv_nsec = 50000000;;
1419 res = timer_settime( timers[0], 0, &tp, NULL );
1420 CYG_ASSERT( res == 0 , "timer_settime() returned error");
1422 sem_init(&synchro, 0, 0);
1424 { res = sem_wait(&synchro);
1425 } while( res == -1 && errno == EINTR );
1426 CYG_ASSERT( res == 0 , "sem_wait() returned error");
1427 res = timer_delete(timers[0]);
1428 CYG_ASSERT( res == 0 , "timerdelete() returned error");
1429 show_times(sched_ft, nscheds, "Timer latency [2 threads]");
1430 for (i = 0; i < 2; i++) {
1431 pthread_cancel(threads[i]);
1432 pthread_join(threads[i], &retval);
1437 for (i = 0; i < ntest_threads; i++) {
1438 pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
1439 pthread_attr_setstacksize( &attr, STACK_SIZE );
1440 res = pthread_create( &threads[i],
1445 CYG_ASSERT( res == 0 , "pthread_create() returned error");
1447 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1448 sigev.sigev_signo = SIGRTMIN+1;
1449 sigev.sigev_value.sival_ptr = (void*)(&timers[i]);
1450 res = timer_create( CLOCK_REALTIME, &sigev, &timers[0]);
1451 CYG_ASSERT( res == 0 , "timer_create() returned error");
1452 tp.it_value.tv_sec = 0;
1453 tp.it_value.tv_nsec = 50000000;
1454 tp.it_interval.tv_sec = 0;
1455 tp.it_interval.tv_nsec = 50000000;;
1457 res = timer_settime( timers[0], 0, &tp, NULL );
1458 CYG_ASSERT( res == 0 , "timer_settime() returned error");
1460 sem_init(&synchro, 0, 0);
1462 { res = sem_wait(&synchro);
1463 } while( res == -1 && errno == EINTR );
1464 CYG_ASSERT( res == 0 , "sem_wait() returned error");
1465 res = timer_delete(timers[0]);
1466 CYG_ASSERT( res == 0 , "timerdelete() returned error");
1467 show_times(sched_ft, nscheds, "Timer latency [many threads]");
1468 for (i = 0; i < ntest_threads; i++) {
1469 pthread_cancel(threads[i]);
1470 pthread_join(threads[i], &retval);
1473 sem_init(&synchro, 0, 0);
1474 sem_init(&timer_sem, 0, 0);
1475 pthread_attr_setstackaddr( &attr, &stacks[0][STACK_SIZE] );
1476 pthread_attr_setstacksize( &attr, STACK_SIZE );
1477 res = pthread_create( &threads[0],
1482 CYG_ASSERT( res == 0 , "pthread_create() returned error");
1484 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1485 sigev.sigev_signo = SIGRTMIN+2;
1486 sigev.sigev_value.sival_ptr = (void*)(threads[0]);
1487 res = timer_create( CLOCK_REALTIME, &sigev, &timers[0]);
1488 CYG_ASSERT( res == 0 , "timer_create() returned error");
1489 tp.it_value.tv_sec = 0;
1490 tp.it_value.tv_nsec = 50000000;
1491 tp.it_interval.tv_sec = 0;
1492 tp.it_interval.tv_nsec = 50000000;;
1494 res = timer_settime( timers[0], 0, &tp, NULL );
1495 CYG_ASSERT( res == 0 , "timer_settime() returned error");
1498 { res = sem_wait(&synchro);
1499 } while( res == -1 && errno == EINTR );
1500 CYG_ASSERT( res == 0 , "sem_wait() returned error");
1501 res = timer_delete(timers[0]);
1502 CYG_ASSERT( res == 0 , "timerdelete() returned error");
1503 show_times(sched_ft, nscheds, "Timer -> thread post latency");
1504 sem_post(&timer_sem);
1505 // pthread_cancel(threads[0]);
1506 pthread_join(threads[0], &retval);
1509 end_of_test_group();
1513 //--------------------------------------------------------------------------
1519 cyg_uint32 tv[nsamples], tv0, tv1;
1520 // cyg_uint32 min_stack, max_stack, total_stack, actual_stack, j;
1521 cyg_tick_count_t ticks, tick0, tick1;
1522 #ifdef CYG_SCHEDULER_LOCK_TIMINGS
1523 cyg_uint32 lock_ave, lock_max;
1525 #if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_LATENCY) && defined(HAL_CLOCK_LATENCY)
1526 cyg_int32 clock_ave;
1529 disable_clock_latency_measurement();
1531 // cyg_test_dump_thread_stack_stats( "Startup, main stack", thread[0] );
1532 cyg_test_dump_interrupt_stack_stats( "Startup" );
1533 cyg_test_dump_idlethread_stack_stats( "Startup" );
1534 cyg_test_clear_interrupt_stack();
1536 diag_printf("\neCos Kernel Timings\n");
1537 diag_printf("Notes: all times are in microseconds (.000001) unless otherwise stated\n");
1538 #ifdef STATS_WITHOUT_FIRST_SAMPLE
1539 diag_printf(" second line of results have first sample removed\n");
1542 cyg_thread_delay(2); // Make sure the clock is actually running
1544 ns_per_system_clock = 1000000/rtc_resolution[1];
1546 for (i = 0; i < nsamples; i++) {
1547 HAL_CLOCK_READ(&tv[i]);
1550 for (i = 1; i < nsamples; i++) {
1551 tv0 += tv[i] - tv[i-1];
1553 end_of_test_group();
1555 overhead = tv0 / (nsamples-1);
1556 diag_printf("Reading the hardware clock takes %d 'ticks' overhead\n", overhead);
1557 diag_printf("... this value will be factored out of all other measurements\n");
1559 // Try and measure how long the clock interrupt handling takes
1560 for (i = 0; i < nsamples; i++) {
1561 tick0 = cyg_current_time();
1563 tick1 = cyg_current_time();
1564 if (tick0 != tick1) break;
1566 HAL_CLOCK_READ(&tv[i]);
1569 for (i = 0; i < nsamples; i++) {
1570 tv1 += tv[i] * 1000;
1572 tv1 = tv1 / nsamples;
1573 tv1 -= overhead; // Adjust out the cost of getting the timer value
1574 diag_printf("Clock interrupt took");
1575 show_ticks_in_us(tv1);
1576 diag_printf(" microseconds (%d raw clock ticks)\n", tv1/1000);
1577 enable_clock_latency_measurement();
1579 ticks = cyg_current_time();
1581 show_test_parameters();
1584 reset_clock_latency_measurement();
1587 #ifdef CYGPKG_POSIX_PTHREAD_MUTEX
1589 // run_mbox_tests();
1591 run_semaphore_tests();
1594 #ifdef CYG_SCHEDULER_LOCK_TIMINGS
1595 Cyg_Scheduler::get_lock_times(&lock_ave, &lock_max);
1596 diag_printf("\nMax lock:");
1597 show_ticks_in_us(lock_max);
1598 diag_printf(", Ave lock:");
1599 show_ticks_in_us(lock_ave);
1603 #if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_LATENCY) && defined(HAL_CLOCK_LATENCY)
1604 // Display latency figures in same format as all other numbers
1605 disable_clock_latency_measurement();
1606 clock_ave = (total_clock_latency*1000) / total_clock_interrupts;
1607 show_ticks_in_us(clock_ave);
1608 show_ticks_in_us(min_clock_latency*1000);
1609 show_ticks_in_us(max_clock_latency*1000);
1610 show_ticks_in_us(0);
1611 diag_printf(" Clock/interrupt latency\n\n");
1612 enable_clock_latency_measurement();
1615 #if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_DSR_LATENCY)
1616 disable_clock_latency_measurement();
1617 clock_ave = (total_clock_dsr_latency*1000) / total_clock_dsr_calls;
1618 show_ticks_in_us(clock_ave);
1619 show_ticks_in_us(min_clock_dsr_latency*1000);
1620 show_ticks_in_us(max_clock_dsr_latency*1000);
1621 show_ticks_in_us(0);
1622 diag_printf(" Clock DSR latency\n\n");
1623 enable_clock_latency_measurement();
1627 disable_clock_latency_measurement();
1628 min_stack = STACK_SIZE;
1631 for (i = 0; i < (int)NTEST_THREADS; i++) {
1632 for (j = 0; j < STACK_SIZE; j++) {
1633 if (stacks[i][j]) break;
1635 actual_stack = STACK_SIZE-j;
1636 if (actual_stack < min_stack) min_stack = actual_stack;
1637 if (actual_stack > max_stack) max_stack = actual_stack;
1638 total_stack += actual_stack;
1640 for (j = 0; j < STACKSIZE; j++) {
1641 if (((char *)stack[0])[j]) break;
1643 diag_printf("%5d %5d %5d (main stack: %5d) Thread stack used (%d total)\n",
1644 total_stack/NTEST_THREADS, min_stack, max_stack,
1645 STACKSIZE - j, STACK_SIZE);
1648 // cyg_test_dump_thread_stack_stats( "All done, main stack", thread[0] );
1649 cyg_test_dump_interrupt_stack_stats( "All done" );
1650 cyg_test_dump_idlethread_stack_stats( "All done" );
1652 enable_clock_latency_measurement();
1654 ticks = cyg_current_time();
1655 diag_printf("\nTiming complete - %d ms total\n\n", (int)((ticks*ns_per_system_clock)/1000));
1657 CYG_TEST_PASS_FINISH("Basic timing OK");
1660 int main( int argc, char **argv )
1664 if (cyg_test_is_simulator) {
1665 nsamples = NSAMPLES_SIM;
1666 ntest_threads = NTEST_THREADS_SIM;
1667 nthread_switches = NTHREAD_SWITCHES_SIM;
1668 #ifdef CYGPKG_POSIX_PTHREAD_MUTEX
1669 nmutexes = NMUTEXES_SIM;
1671 nmboxes = NMBOXES_SIM;
1672 nsemaphores = NSEMAPHORES_SIM;
1673 nscheds = NSCHEDS_SIM;
1674 ntimers = NTIMERS_SIM;
1676 nsamples = NSAMPLES;
1677 ntest_threads = NTEST_THREADS;
1678 nthread_switches = NTHREAD_SWITCHES;
1679 #ifdef CYGPKG_POSIX_PTHREAD_MUTEX
1680 nmutexes = NMUTEXES;
1683 nsemaphores = NSEMAPHORES;
1689 #ifdef WORKHORSE_TEST
1690 ntest_threads = max(512, ntest_threads);
1691 #ifdef CYGPKG_POSIX_PTHREAD_MUTEX
1692 nmutexes = max(1024, nmutexes);
1694 nsemaphores = max(1024, nsemaphores);
1695 nmboxes = max(1024, nmboxes);
1696 ncounters = max(1024, ncounters);
1697 ntimers = max(1024, ntimers);
1699 ntest_threads = max(64, ntest_threads);
1700 #ifdef CYGPKG_POSIX_PTHREAD_MUTEX
1701 nmutexes = max(32, nmutexes);
1703 nsemaphores = max(32, nsemaphores);
1704 nmboxes = max(32, nmboxes);
1705 ntimers = max(32, ntimers);
1712 #endif // CYGFUN_KERNEL_API_C, etc.