1 //==========================================================================
5 // Basic timing test / scaffolding
7 //==========================================================================
8 //####ECOSGPLCOPYRIGHTBEGIN####
9 // -------------------------------------------
10 // This file is part of eCos, the Embedded Configurable Operating System.
11 // Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.
12 // Copyright (C) 2002 Jonathan Larmour
14 // eCos is free software; you can redistribute it and/or modify it under
15 // the terms of the GNU General Public License as published by the Free
16 // Software Foundation; either version 2 or (at your option) any later version.
18 // eCos is distributed in the hope that it will be useful, but WITHOUT ANY
19 // WARRANTY; without even the implied warranty of MERCHANTABILITY or
20 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
23 // You should have received a copy of the GNU General Public License along
24 // with eCos; if not, write to the Free Software Foundation, Inc.,
25 // 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
27 // As a special exception, if other files instantiate templates or use macros
28 // or inline functions from this file, or you compile this file and link it
29 // with other works to produce a work based on this file, this file does not
30 // by itself cause the resulting work to be covered by the GNU General Public
31 // License. However the source code for this file must still be made available
32 // in accordance with section (3) of the GNU General Public License.
34 // This exception does not invalidate any other reasons why a work based on
35 // this file might be covered by the GNU General Public License.
37 // Alternative licenses for eCos may be arranged by contacting Red Hat, Inc.
38 // at http://sources.redhat.com/ecos/ecos-license/
39 // -------------------------------------------
40 //####ECOSGPLCOPYRIGHTEND####
41 //==========================================================================
42 //#####DESCRIPTIONBEGIN####
44 // Author(s): gthomas,nickg
45 // Contributors: jlarmour
47 // Description: Very simple kernel timing test
48 //####DESCRIPTIONEND####
49 //==========================================================================
52 #include <cyg/infra/testcase.h>
53 #include <cyg/infra/diag.h>
54 #include <pkgconf/posix.h>
55 #include <pkgconf/system.h>
57 #include <pkgconf/kernel.h>
60 #ifndef CYGPKG_POSIX_SIGNALS
61 #define NA_MSG "No POSIX signals"
62 #elif !defined(CYGPKG_POSIX_TIMERS)
63 #define NA_MSG "No POSIX timers"
64 #elif !defined(CYGPKG_POSIX_PTHREAD)
65 #define NA_MSG "POSIX threads not enabled"
66 #elif !defined(CYGFUN_KERNEL_API_C)
67 #define NA_MSG "Kernel C API not enabled"
68 #elif !defined(CYGSEM_KERNEL_SCHED_MLQUEUE)
69 #define NA_MSG "Kernel mlqueue scheduler not enabled"
70 #elif !defined(CYGVAR_KERNEL_COUNTERS_CLOCK)
71 #define NA_MSG "Kernel clock not enabled"
72 #elif CYGNUM_KERNEL_SCHED_PRIORITIES <= 12
73 #define NA_MSG "Kernel scheduler properties <= 12"
76 //==========================================================================
87 #include <pkgconf/kernel.h>
88 #include <pkgconf/hal.h>
90 #include <cyg/kernel/sched.hxx>
91 #include <cyg/kernel/thread.hxx>
92 #include <cyg/kernel/thread.inl>
93 #include <cyg/kernel/mutex.hxx>
94 #include <cyg/kernel/sema.hxx>
95 #include <cyg/kernel/sched.inl>
96 #include <cyg/kernel/clock.hxx>
97 #include <cyg/kernel/clock.inl>
98 #include <cyg/kernel/kapi.h>
100 #include <cyg/infra/testcase.h>
102 #include <cyg/kernel/test/stackmon.h>
103 #include CYGHWR_MEMORY_LAYOUT_H
108 #include <sys/types.h>
110 #include <semaphore.h>
115 //==========================================================================
116 // Define this to see the statistics with the first sample datum removed.
117 // This can expose the effects of caches on the speed of operations.
119 #undef STATS_WITHOUT_FIRST_SAMPLE
121 //==========================================================================
123 // Structure used to keep track of times
124 typedef struct fun_times {
129 //==========================================================================
131 #define STACK_SIZE (PTHREAD_STACK_MIN*2)
134 #define NTEST_THREADS 16
137 #define NSEMAPHORES 32
142 #define NTHREAD_SWITCHES 128
145 #define NSAMPLES_SIM 2
146 #define NTEST_THREADS_SIM 2
147 #define NTHREAD_SWITCHES_SIM 4
148 #define NMUTEXES_SIM 2
149 #define NMBOXES_SIM 2
150 #define NSEMAPHORES_SIM 2
151 #define NSCHEDS_SIM 4
152 #define NTIMERS_SIM 2
154 //==========================================================================
157 static int ntest_threads;
158 static int nthread_switches;
161 static int nsemaphores;
165 static char stacks[NTEST_THREADS][STACK_SIZE];
166 static pthread_t threads[NTEST_THREADS];
168 static sem_t synchro;
169 static fun_times thread_ft[NTEST_THREADS];
171 static fun_times test2_ft[NTHREAD_SWITCHES];
173 static pthread_mutex_t test_mutexes[NMUTEXES];
174 static fun_times mutex_ft[NMUTEXES];
175 static pthread_t mutex_test_thread_handle;
178 static cyg_mbox test_mboxes[NMBOXES];
179 static cyg_handle_t test_mbox_handles[NMBOXES];
180 static fun_times mbox_ft[NMBOXES];
181 static cyg_thread mbox_test_thread;
182 static cyg_handle_t mbox_test_thread_handle;
185 static sem_t test_semaphores[NSEMAPHORES];
186 static fun_times semaphore_ft[NSEMAPHORES];
187 static pthread_t semaphore_test_thread_handle;
189 static fun_times sched_ft[NSCHEDS];
191 static timer_t timers[NTIMERS];
192 static fun_times timer_ft[NTIMERS];
194 static long rtc_resolution[] = CYGNUM_KERNEL_COUNTERS_RTC_RESOLUTION;
195 static long ns_per_system_clock;
197 #if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_LATENCY)
198 // Data kept by kernel real time clock measuring clock interrupt latency
199 extern cyg_tick_count total_clock_latency, total_clock_interrupts;
200 extern cyg_int32 min_clock_latency, max_clock_latency;
201 extern bool measure_clock_latency;
204 #if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_DSR_LATENCY)
205 extern cyg_tick_count total_clock_dsr_latency, total_clock_dsr_calls;
206 extern cyg_int32 min_clock_dsr_latency, max_clock_dsr_latency;
207 extern bool measure_clock_latency;
210 //==========================================================================
212 void run_sched_tests(void);
213 void run_thread_tests(void);
214 void run_thread_switch_test(void);
215 void run_mutex_tests(void);
216 void run_mutex_circuit_test(void);
217 void run_mbox_tests(void);
218 void run_mbox_circuit_test(void);
219 void run_semaphore_tests(void);
220 void run_semaphore_circuit_test(void);
221 void run_timer_tests(void);
223 //==========================================================================
226 #define max(n,m) (m > n ? n : m)
229 //==========================================================================
230 // Wait until a clock tick [real time clock] has passed. This should keep it
231 // from happening again during a measurement, thus minimizing any fluctuations
235 cyg_tick_count_t tv0, tv1;
236 tv0 = cyg_current_time();
238 tv1 = cyg_current_time();
239 if (tv1 != tv0) break;
243 //--------------------------------------------------------------------------
244 // Display a number of ticks as microseconds
245 // Note: for improved calculation significance, values are kept in ticks*1000
247 show_ticks_in_us(cyg_uint32 ticks)
250 ns = (ns_per_system_clock * (long long)ticks) / CYGNUM_KERNEL_COUNTERS_RTC_PERIOD;
251 ns += 5; // for rounding to .01us
252 diag_printf("%5d.%02d", (int)(ns/1000), (int)((ns%1000)/10));
255 //--------------------------------------------------------------------------
257 // If the kernel is instrumented to measure clock interrupt latency, these
258 // measurements can be drastically perturbed by printing via "diag_printf()"
259 // since that code may run with interrupts disabled for long periods.
261 // In order to get accurate/reasonable latency figures _for the kernel
262 // primitive functions beint tested_, the kernel's latency measurements
263 // are suspended while the printing actually takes place.
265 // The measurements are reenabled after the printing, thus allowing for
266 // fair measurements of the kernel primitives, which are not distorted
267 // by the printing mechanisms.
269 #if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_LATENCY) && defined(HAL_CLOCK_LATENCY)
271 disable_clock_latency_measurement(void)
274 measure_clock_latency = false;
278 enable_clock_latency_measurement(void)
281 measure_clock_latency = true;
284 // Ensure that the measurements are reasonable (no startup anomalies)
286 reset_clock_latency_measurement(void)
288 disable_clock_latency_measurement();
289 total_clock_latency = 0;
290 total_clock_interrupts = 0;
291 min_clock_latency = 0x7FFFFFFF;
292 max_clock_latency = 0;
293 #if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_DSR_LATENCY)
294 total_clock_dsr_latency = 0;
295 total_clock_dsr_calls = 0;
296 min_clock_dsr_latency = 0x7FFFFFFF;
297 max_clock_dsr_latency = 0;
299 enable_clock_latency_measurement();
303 #define disable_clock_latency_measurement()
304 #define enable_clock_latency_measurement()
305 #define reset_clock_latency_measurement()
308 //--------------------------------------------------------------------------
313 disable_clock_latency_measurement();
315 diag_printf(" Confidence\n");
316 diag_printf(" Ave Min Max Var Ave Min Function\n");
317 diag_printf(" ====== ====== ====== ====== ========== ========\n");
318 enable_clock_latency_measurement();
322 show_times_detail(fun_times ft[], int nsamples, char *title, bool ignore_first)
324 int i, delta, min, max, con_ave, con_min, ave_dev;
325 int start_sample, total_samples;
326 cyg_int32 total, ave;
330 total_samples = nsamples-1;
333 total_samples = nsamples;
338 for (i = start_sample; i < nsamples; i++) {
339 if (ft[i].end < ft[i].start) {
340 // Clock wrapped around (timer tick)
341 delta = (ft[i].end+CYGNUM_KERNEL_COUNTERS_RTC_PERIOD) - ft[i].start;
343 delta = ft[i].end - ft[i].start;
346 if (delta < 0) delta = 0;
349 if (delta < min) min = delta;
350 if (delta > max) max = delta;
352 ave = total / total_samples;
355 for (i = start_sample; i < nsamples; i++) {
356 if (ft[i].end < ft[i].start) {
357 // Clock wrapped around (timer tick)
358 delta = (ft[i].end+CYGNUM_KERNEL_COUNTERS_RTC_PERIOD) - ft[i].start;
360 delta = ft[i].end - ft[i].start;
363 if (delta < 0) delta = 0;
366 if (delta < 0) delta = -delta;
369 ave_dev /= total_samples;
372 for (i = start_sample; i < nsamples; i++) {
373 if (ft[i].end < ft[i].start) {
374 // Clock wrapped around (timer tick)
375 delta = (ft[i].end+CYGNUM_KERNEL_COUNTERS_RTC_PERIOD) - ft[i].start;
377 delta = ft[i].end - ft[i].start;
380 if (delta < 0) delta = 0;
382 if ((delta <= (ave+ave_dev)) && (delta >= (ave-ave_dev))) con_ave++;
383 if ((delta <= (min+ave_dev)) && (delta >= (min-ave_dev))) con_min++;
385 con_ave = (con_ave * 100) / total_samples;
386 con_min = (con_min * 100) / total_samples;
387 show_ticks_in_us(ave);
388 show_ticks_in_us(min);
389 show_ticks_in_us(max);
390 show_ticks_in_us(ave_dev);
391 disable_clock_latency_measurement();
392 diag_printf(" %3d%% %3d%%", con_ave, con_min);
393 diag_printf(" %s\n", title);
394 enable_clock_latency_measurement();
398 show_times(fun_times ft[], int nsamples, char *title)
400 show_times_detail(ft, nsamples, title, false);
401 #ifdef STATS_WITHOUT_FIRST_SAMPLE
402 show_times_detail(ft, nsamples, "", true);
406 //--------------------------------------------------------------------------
409 show_test_parameters(void)
411 disable_clock_latency_measurement();
412 diag_printf("\nTesting parameters:\n");
413 diag_printf(" Clock samples: %5d\n", nsamples);
414 diag_printf(" Threads: %5d\n", ntest_threads);
415 diag_printf(" Thread switches: %5d\n", nthread_switches);
416 diag_printf(" Mutexes: %5d\n", nmutexes);
417 diag_printf(" Mailboxes: %5d\n", nmboxes);
418 diag_printf(" Semaphores: %5d\n", nsemaphores);
419 diag_printf(" Scheduler operations: %5d\n", nscheds);
420 diag_printf(" Timers: %5d\n", ntimers);
422 enable_clock_latency_measurement();
426 end_of_test_group(void)
428 disable_clock_latency_measurement();
430 enable_clock_latency_measurement();
433 //--------------------------------------------------------------------------
434 // Compute a name for a thread
437 thread_name(char *basename, int indx) {
438 return "<<NULL>>"; // Not currently used
441 //--------------------------------------------------------------------------
442 // test0 - null test, just return
450 //--------------------------------------------------------------------------
451 // test3 - loop, yeilding repeatedly and checking for cancellation
459 pthread_testcancel();
465 //--------------------------------------------------------------------------
466 // test1 - empty test, simply exit. Last thread signals parent.
471 if ((cyg_uint32)indx == (cyg_uint32)(ntest_threads-1)) {
472 sem_post(&synchro); // Signal that last thread is dying
477 //--------------------------------------------------------------------------
478 // test2 - measure thread switch times
484 for (i = 0; i < nthread_switches; i++) {
485 if ((int)indx == 0) {
486 HAL_CLOCK_READ(&test2_ft[i].start);
488 HAL_CLOCK_READ(&test2_ft[i].end);
492 if ((int)indx == 1) {
499 //--------------------------------------------------------------------------
500 // Full-circuit mutex unlock/lock test
503 mutex_test(void * indx)
506 pthread_mutex_lock(&test_mutexes[0]);
507 for (i = 0; i < nmutexes; i++) {
509 wait_for_tick(); // Wait until the next clock tick to minimize aberations
510 HAL_CLOCK_READ(&mutex_ft[i].start);
511 pthread_mutex_unlock(&test_mutexes[0]);
512 pthread_mutex_lock(&test_mutexes[0]);
518 //--------------------------------------------------------------------------
519 // Full-circuit mbox put/get test
523 mbox_test(cyg_uint32 indx)
527 item = cyg_mbox_get(test_mbox_handles[0]);
528 HAL_CLOCK_READ(&mbox_ft[(int)item].end);
529 cyg_semaphore_post(&synchro);
530 } while ((int)item != (nmboxes-1));
535 //--------------------------------------------------------------------------
536 // Full-circuit semaphore post/wait test
539 semaphore_test(void * indx)
542 for (i = 0; i < nsemaphores; i++) {
543 sem_wait(&test_semaphores[0]);
544 HAL_CLOCK_READ(&semaphore_ft[i].end);
550 //--------------------------------------------------------------------------
552 // This set of tests is used to measure kernel primitives that deal with threads
556 run_thread_tests(void)
561 struct sched_param schedparam;
566 // Set my priority higher than any I plan to create
567 schedparam.sched_priority = 30;
568 pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
570 // Initiaize thread creation attributes
572 pthread_attr_init( &attr );
573 pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
574 pthread_attr_setschedpolicy( &attr, SCHED_RR );
575 schedparam.sched_priority = 10;
576 pthread_attr_setschedparam( &attr, &schedparam );
579 wait_for_tick(); // Wait until the next clock tick to minimize aberations
580 for (i = 0; i < ntest_threads; i++) {
581 HAL_CLOCK_READ(&thread_ft[i].start);
583 pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
584 pthread_attr_setstacksize( &attr, STACK_SIZE );
585 pthread_create( &threads[i],
591 HAL_CLOCK_READ(&thread_ft[i].end);
593 show_times(thread_ft, ntest_threads, "Create thread");
595 wait_for_tick(); // Wait until the next clock tick to minimize aberations
596 for (i = 0; i < ntest_threads; i++) {
597 HAL_CLOCK_READ(&thread_ft[i].start);
599 HAL_CLOCK_READ(&thread_ft[i].end);
601 show_times(thread_ft, ntest_threads, "Yield thread [all lower priority]");
603 wait_for_tick(); // Wait until the next clock tick to minimize aberations
604 for (i = 0; i < ntest_threads; i++) {
605 HAL_CLOCK_READ(&thread_ft[i].start);
607 schedparam.sched_priority = 11;
608 pthread_attr_setschedparam( &attr, &schedparam );
609 pthread_setschedparam(threads[i], SCHED_RR, &schedparam);
611 HAL_CLOCK_READ(&thread_ft[i].end);
613 show_times(thread_ft, ntest_threads, "Set priority");
615 wait_for_tick(); // Wait until the next clock tick to minimize aberations
616 for (i = 0; i < ntest_threads; i++) {
617 HAL_CLOCK_READ(&thread_ft[i].start);
618 pthread_getschedparam( threads[i], &policy, &schedparam );
619 HAL_CLOCK_READ(&thread_ft[i].end);
621 show_times(thread_ft, ntest_threads, "Get priority");
623 cyg_thread_delay(1); // Let the test threads run
625 wait_for_tick(); // Wait until the next clock tick to minimize aberations
626 for (i = 0; i < ntest_threads; i++) {
627 HAL_CLOCK_READ(&thread_ft[i].start);
628 pthread_join(threads[i], &retval);
629 HAL_CLOCK_READ(&thread_ft[i].end);
631 show_times(thread_ft, ntest_threads, "Join exited thread");
633 wait_for_tick(); // Wait until the next clock tick to minimize aberations
634 for (i = 0; i < ntest_threads; i++) {
635 HAL_CLOCK_READ(&thread_ft[i].start);
637 HAL_CLOCK_READ(&thread_ft[i].end);
639 show_times(thread_ft, ntest_threads, "Yield [no other] thread");
642 // Recreate the test set
644 schedparam.sched_priority = 10;
645 pthread_attr_setschedparam( &attr, &schedparam );
647 for (i = 0; i < ntest_threads; i++) {
648 pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
649 pthread_attr_setstacksize( &attr, STACK_SIZE );
650 pthread_create( &threads[i],
657 cyg_thread_delay(1); // Let the test threads run
659 wait_for_tick(); // Wait until the next clock tick to minimize aberations
660 for (i = 0; i < ntest_threads; i++) {
661 HAL_CLOCK_READ(&thread_ft[i].start);
662 pthread_cancel(threads[i]);
663 HAL_CLOCK_READ(&thread_ft[i].end);
665 show_times(thread_ft, ntest_threads, "Cancel [running] thread");
667 cyg_thread_delay(1); // Let the test threads do their cancellations
669 wait_for_tick(); // Wait until the next clock tick to minimize aberations
670 for (i = 0; i < ntest_threads; i++) {
671 HAL_CLOCK_READ(&thread_ft[i].start);
672 pthread_join(threads[i], &retval);
673 HAL_CLOCK_READ(&thread_ft[i].end);
675 show_times(thread_ft, ntest_threads, "Join [cancelled] thread");
678 // Set my priority lower than any I plan to create
679 schedparam.sched_priority = 5;
680 pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
682 // Set up the end-of-threads synchronizer
683 sem_init(&synchro, 0, 0);
685 schedparam.sched_priority = 10;
686 pthread_attr_setschedparam( &attr, &schedparam );
688 wait_for_tick(); // Wait until the next clock tick to minimize aberations
689 for (i = 0; i < ntest_threads; i++) {
690 HAL_CLOCK_READ(&thread_ft[i].start);
692 pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
693 pthread_attr_setstacksize( &attr, STACK_SIZE );
694 pthread_create( &threads[i],
700 HAL_CLOCK_READ(&thread_ft[i].end);
702 show_times(thread_ft, ntest_threads, "Create [high priority] thread");
704 sem_wait(&synchro); // Wait for all threads to finish
706 // Make sure they are all dead
707 for (i = 0; i < ntest_threads; i++) {
708 pthread_join(threads[i], &retval);
711 run_thread_switch_test();
716 //--------------------------------------------------------------------------
719 run_thread_switch_test(void)
723 struct sched_param schedparam;
727 // Set my priority higher than any I plan to create
728 schedparam.sched_priority = 30;
729 pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
731 // Initiaize thread creation attributes
733 pthread_attr_init( &attr );
734 pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
735 pthread_attr_setschedpolicy( &attr, SCHED_RR );
736 schedparam.sched_priority = 10;
737 pthread_attr_setschedparam( &attr, &schedparam );
739 // Set up the end-of-threads synchronizer
741 sem_init(&synchro, 0, 0);
743 // Set up for thread context switch
745 for (i = 0; i < 2; i++) {
746 pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
747 pthread_attr_setstacksize( &attr, STACK_SIZE );
748 pthread_create( &threads[i],
755 wait_for_tick(); // Wait until the next clock tick to minimize aberations
759 show_times(test2_ft, nthread_switches, "Thread switch");
762 for (i = 0; i < 2; i++) {
763 pthread_join(threads[i], &retval);
769 //--------------------------------------------------------------------------
772 run_mutex_tests(void)
776 pthread_mutexattr_t attr;
778 pthread_mutexattr_init( &attr );
781 wait_for_tick(); // Wait until the next clock tick to minimize aberations
782 for (i = 0; i < nmutexes; i++) {
783 HAL_CLOCK_READ(&mutex_ft[i].start);
784 pthread_mutex_init(&test_mutexes[i], &attr);
785 HAL_CLOCK_READ(&mutex_ft[i].end);
787 show_times(mutex_ft, nmutexes, "Init mutex");
790 wait_for_tick(); // Wait until the next clock tick to minimize aberations
791 for (i = 0; i < nmutexes; i++) {
792 HAL_CLOCK_READ(&mutex_ft[i].start);
793 pthread_mutex_lock(&test_mutexes[i]);
794 HAL_CLOCK_READ(&mutex_ft[i].end);
796 show_times(mutex_ft, nmutexes, "Lock [unlocked] mutex");
798 wait_for_tick(); // Wait until the next clock tick to minimize aberations
799 for (i = 0; i < nmutexes; i++) {
800 HAL_CLOCK_READ(&mutex_ft[i].start);
801 pthread_mutex_unlock(&test_mutexes[i]);
802 HAL_CLOCK_READ(&mutex_ft[i].end);
804 show_times(mutex_ft, nmutexes, "Unlock [locked] mutex");
806 wait_for_tick(); // Wait until the next clock tick to minimize aberations
807 for (i = 0; i < nmutexes; i++) {
808 HAL_CLOCK_READ(&mutex_ft[i].start);
809 pthread_mutex_trylock(&test_mutexes[i]);
810 HAL_CLOCK_READ(&mutex_ft[i].end);
812 show_times(mutex_ft, nmutexes, "Trylock [unlocked] mutex");
814 wait_for_tick(); // Wait until the next clock tick to minimize aberations
815 for (i = 0; i < nmutexes; i++) {
816 HAL_CLOCK_READ(&mutex_ft[i].start);
817 pthread_mutex_trylock(&test_mutexes[i]);
818 HAL_CLOCK_READ(&mutex_ft[i].end);
820 show_times(mutex_ft, nmutexes, "Trylock [locked] mutex");
822 // Must unlock mutices before destroying them.
823 for (i = 0; i < nmutexes; i++) {
824 pthread_mutex_unlock(&test_mutexes[i]);
827 wait_for_tick(); // Wait until the next clock tick to minimize aberations
828 for (i = 0; i < nmutexes; i++) {
829 HAL_CLOCK_READ(&mutex_ft[i].start);
830 pthread_mutex_destroy(&test_mutexes[i]);
831 HAL_CLOCK_READ(&mutex_ft[i].end);
833 show_times(mutex_ft, nmutexes, "Destroy mutex");
836 run_mutex_circuit_test();
840 //--------------------------------------------------------------------------
843 run_mutex_circuit_test(void)
846 pthread_mutexattr_t mattr;
847 struct sched_param schedparam;
851 // Set my priority lower than any I plan to create
852 schedparam.sched_priority = 5;
853 pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
855 // Initiaize thread creation attributes
857 pthread_attr_init( &attr );
858 pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
859 pthread_attr_setschedpolicy( &attr, SCHED_RR );
860 schedparam.sched_priority = 10;
861 pthread_attr_setschedparam( &attr, &schedparam );
863 // Set up for full mutex unlock/lock test
864 pthread_mutexattr_init( &mattr );
865 pthread_mutex_init(&test_mutexes[0], &mattr);
866 sem_init(&synchro, 0, 0);
868 pthread_attr_setstackaddr( &attr, &stacks[0][STACK_SIZE] );
869 pthread_attr_setstacksize( &attr, STACK_SIZE );
870 pthread_create( &mutex_test_thread_handle,
876 // Need to raise priority so that this thread will block on the "lock"
877 schedparam.sched_priority = 20;
878 pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
880 for (i = 0; i < nmutexes; i++) {
882 pthread_mutex_lock(&test_mutexes[0]);
883 HAL_CLOCK_READ(&mutex_ft[i].end);
884 pthread_mutex_unlock(&test_mutexes[0]);
887 pthread_join(mutex_test_thread_handle, &retval);
888 show_times(mutex_ft, nmutexes, "Unlock/Lock mutex");
893 //--------------------------------------------------------------------------
894 // Message queue tests
896 // Currently disabled, pending implementation of POSIX message queues
904 // Mailbox primitives
905 wait_for_tick(); // Wait until the next clock tick to minimize aberations
906 for (i = 0; i < nmboxes; i++) {
907 HAL_CLOCK_READ(&mbox_ft[i].start);
908 cyg_mbox_create(&test_mbox_handles[i], &test_mboxes[i]);
909 HAL_CLOCK_READ(&mbox_ft[i].end);
911 show_times(mbox_ft, nmboxes, "Create mbox");
913 wait_for_tick(); // Wait until the next clock tick to minimize aberations
914 for (i = 0; i < nmboxes; i++) {
915 HAL_CLOCK_READ(&mbox_ft[i].start);
916 cnt = cyg_mbox_peek(test_mbox_handles[i]);
917 HAL_CLOCK_READ(&mbox_ft[i].end);
919 show_times(mbox_ft, nmboxes, "Peek [empty] mbox");
921 #ifdef CYGMFN_KERNEL_SYNCH_MBOXT_PUT_CAN_WAIT
922 wait_for_tick(); // Wait until the next clock tick to minimize aberations
923 for (i = 0; i < nmboxes; i++) {
924 HAL_CLOCK_READ(&mbox_ft[i].start);
925 cyg_mbox_put(test_mbox_handles[i], (void *)i);
926 HAL_CLOCK_READ(&mbox_ft[i].end);
928 show_times(mbox_ft, nmboxes, "Put [first] mbox");
930 wait_for_tick(); // Wait until the next clock tick to minimize aberations
931 for (i = 0; i < nmboxes; i++) {
932 HAL_CLOCK_READ(&mbox_ft[i].start);
933 cnt = cyg_mbox_peek(test_mbox_handles[i]);
934 HAL_CLOCK_READ(&mbox_ft[i].end);
936 show_times(mbox_ft, nmboxes, "Peek [1 msg] mbox");
938 wait_for_tick(); // Wait until the next clock tick to minimize aberations
939 for (i = 0; i < nmboxes; i++) {
940 HAL_CLOCK_READ(&mbox_ft[i].start);
941 cyg_mbox_put(test_mbox_handles[i], (void *)i);
942 HAL_CLOCK_READ(&mbox_ft[i].end);
944 show_times(mbox_ft, nmboxes, "Put [second] mbox");
946 wait_for_tick(); // Wait until the next clock tick to minimize aberations
947 for (i = 0; i < nmboxes; i++) {
948 HAL_CLOCK_READ(&mbox_ft[i].start);
949 cnt = cyg_mbox_peek(test_mbox_handles[i]);
950 HAL_CLOCK_READ(&mbox_ft[i].end);
952 show_times(mbox_ft, nmboxes, "Peek [2 msgs] mbox");
954 wait_for_tick(); // Wait until the next clock tick to minimize aberations
955 for (i = 0; i < nmboxes; i++) {
956 HAL_CLOCK_READ(&mbox_ft[i].start);
957 item = cyg_mbox_get(test_mbox_handles[i]);
958 HAL_CLOCK_READ(&mbox_ft[i].end);
960 show_times(mbox_ft, nmboxes, "Get [first] mbox");
962 wait_for_tick(); // Wait until the next clock tick to minimize aberations
963 for (i = 0; i < nmboxes; i++) {
964 HAL_CLOCK_READ(&mbox_ft[i].start);
965 item = cyg_mbox_get(test_mbox_handles[i]);
966 HAL_CLOCK_READ(&mbox_ft[i].end);
968 show_times(mbox_ft, nmboxes, "Get [second] mbox");
969 #endif // ifdef CYGMFN_KERNEL_SYNCH_MBOXT_PUT_CAN_WAIT
971 wait_for_tick(); // Wait until the next clock tick to minimize aberations
972 for (i = 0; i < nmboxes; i++) {
973 HAL_CLOCK_READ(&mbox_ft[i].start);
974 cyg_mbox_tryput(test_mbox_handles[i], (void *)i);
975 HAL_CLOCK_READ(&mbox_ft[i].end);
977 show_times(mbox_ft, nmboxes, "Tryput [first] mbox");
979 wait_for_tick(); // Wait until the next clock tick to minimize aberations
980 for (i = 0; i < nmboxes; i++) {
981 HAL_CLOCK_READ(&mbox_ft[i].start);
982 item = cyg_mbox_peek_item(test_mbox_handles[i]);
983 HAL_CLOCK_READ(&mbox_ft[i].end);
985 show_times(mbox_ft, nmboxes, "Peek item [non-empty] mbox");
987 wait_for_tick(); // Wait until the next clock tick to minimize aberations
988 for (i = 0; i < nmboxes; i++) {
989 HAL_CLOCK_READ(&mbox_ft[i].start);
990 item = cyg_mbox_tryget(test_mbox_handles[i]);
991 HAL_CLOCK_READ(&mbox_ft[i].end);
993 show_times(mbox_ft, nmboxes, "Tryget [non-empty] mbox");
995 wait_for_tick(); // Wait until the next clock tick to minimize aberations
996 for (i = 0; i < nmboxes; i++) {
997 HAL_CLOCK_READ(&mbox_ft[i].start);
998 item = cyg_mbox_peek_item(test_mbox_handles[i]);
999 HAL_CLOCK_READ(&mbox_ft[i].end);
1001 show_times(mbox_ft, nmboxes, "Peek item [empty] mbox");
1003 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1004 for (i = 0; i < nmboxes; i++) {
1005 HAL_CLOCK_READ(&mbox_ft[i].start);
1006 item = cyg_mbox_tryget(test_mbox_handles[i]);
1007 HAL_CLOCK_READ(&mbox_ft[i].end);
1009 show_times(mbox_ft, nmboxes, "Tryget [empty] mbox");
1011 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1012 for (i = 0; i < nmboxes; i++) {
1013 HAL_CLOCK_READ(&mbox_ft[i].start);
1014 cyg_mbox_waiting_to_get(test_mbox_handles[i]);
1015 HAL_CLOCK_READ(&mbox_ft[i].end);
1017 show_times(mbox_ft, nmboxes, "Waiting to get mbox");
1019 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1020 for (i = 0; i < nmboxes; i++) {
1021 HAL_CLOCK_READ(&mbox_ft[i].start);
1022 cyg_mbox_waiting_to_put(test_mbox_handles[i]);
1023 HAL_CLOCK_READ(&mbox_ft[i].end);
1025 show_times(mbox_ft, nmboxes, "Waiting to put mbox");
1027 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1028 for (i = 0; i < nmboxes; i++) {
1029 HAL_CLOCK_READ(&mbox_ft[i].start);
1030 cyg_mbox_delete(test_mbox_handles[i]);
1031 HAL_CLOCK_READ(&mbox_ft[i].end);
1033 show_times(mbox_ft, nmboxes, "Delete mbox");
1035 run_mbox_circuit_test();
1036 end_of_test_group();
1039 //--------------------------------------------------------------------------
1042 run_mbox_circuit_test(void)
1044 #ifdef CYGMFN_KERNEL_SYNCH_MBOXT_PUT_CAN_WAIT
1046 // Set my priority lower than any I plan to create
1047 cyg_thread_set_priority(cyg_thread_self(), 3);
1048 // Set up for full mbox put/get test
1049 cyg_mbox_create(&test_mbox_handles[0], &test_mboxes[0]);
1050 cyg_semaphore_init(&synchro, 0);
1051 cyg_thread_create(2, // Priority - just a number
1054 thread_name("thread", 0), // Name
1055 &stacks[0][0], // Stack
1057 &mbox_test_thread_handle, // Handle
1058 &mbox_test_thread // Thread data structure
1060 cyg_thread_resume(mbox_test_thread_handle);
1061 for (i = 0; i < nmboxes; i++) {
1062 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1063 HAL_CLOCK_READ(&mbox_ft[i].start);
1064 cyg_mbox_put(test_mbox_handles[0], (void *)i);
1065 cyg_semaphore_wait(&synchro);
1067 cyg_thread_delete(mbox_test_thread_handle);
1068 show_times(mbox_ft, nmboxes, "Put/Get mbox");
1074 //--------------------------------------------------------------------------
1077 run_semaphore_tests(void)
1083 // Semaphore primitives
1084 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1085 for (i = 0; i < nsemaphores; i++) {
1086 HAL_CLOCK_READ(&semaphore_ft[i].start);
1087 sem_init(&test_semaphores[i], 0, 0);
1088 HAL_CLOCK_READ(&semaphore_ft[i].end);
1090 show_times(semaphore_ft, nsemaphores, "Init semaphore");
1092 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1093 for (i = 0; i < nsemaphores; i++) {
1094 HAL_CLOCK_READ(&semaphore_ft[i].start);
1095 sem_post(&test_semaphores[i]);
1096 HAL_CLOCK_READ(&semaphore_ft[i].end);
1098 show_times(semaphore_ft, nsemaphores, "Post [0] semaphore");
1100 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1101 for (i = 0; i < nsemaphores; i++) {
1102 HAL_CLOCK_READ(&semaphore_ft[i].start);
1103 sem_wait(&test_semaphores[i]);
1104 HAL_CLOCK_READ(&semaphore_ft[i].end);
1106 show_times(semaphore_ft, nsemaphores, "Wait [1] semaphore");
1108 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1109 for (i = 0; i < nsemaphores; i++) {
1110 HAL_CLOCK_READ(&semaphore_ft[i].start);
1111 sem_trywait(&test_semaphores[i]);
1112 HAL_CLOCK_READ(&semaphore_ft[i].end);
1114 show_times(semaphore_ft, nsemaphores, "Trywait [0] semaphore");
1116 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1117 for (i = 0; i < nsemaphores; i++) {
1118 sem_post(&test_semaphores[i]);
1119 HAL_CLOCK_READ(&semaphore_ft[i].start);
1120 sem_trywait(&test_semaphores[i]);
1121 HAL_CLOCK_READ(&semaphore_ft[i].end);
1123 show_times(semaphore_ft, nsemaphores, "Trywait [1] semaphore");
1125 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1126 for (i = 0; i < nsemaphores; i++) {
1127 HAL_CLOCK_READ(&semaphore_ft[i].start);
1128 sem_getvalue(&test_semaphores[i], &sem_val);
1129 HAL_CLOCK_READ(&semaphore_ft[i].end);
1131 show_times(semaphore_ft, nsemaphores, "Get value of semaphore");
1133 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1134 for (i = 0; i < nsemaphores; i++) {
1135 HAL_CLOCK_READ(&semaphore_ft[i].start);
1136 sem_destroy(&test_semaphores[i]);
1137 HAL_CLOCK_READ(&semaphore_ft[i].end);
1139 show_times(semaphore_ft, nsemaphores, "Destroy semaphore");
1141 run_semaphore_circuit_test();
1142 end_of_test_group();
1145 //--------------------------------------------------------------------------
1148 run_semaphore_circuit_test(void)
1152 struct sched_param schedparam;
1153 pthread_attr_t attr;
1156 // Set my priority lower than any I plan to create
1157 schedparam.sched_priority = 5;
1158 pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
1160 // Initiaize thread creation attributes
1162 pthread_attr_init( &attr );
1163 pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
1164 pthread_attr_setschedpolicy( &attr, SCHED_RR );
1165 schedparam.sched_priority = 10;
1166 pthread_attr_setschedparam( &attr, &schedparam );
1168 // Set up for full semaphore post/wait test
1169 sem_init(&test_semaphores[0], 0, 0);
1170 sem_init(&synchro, 0, 0);
1172 pthread_attr_setstackaddr( &attr, &stacks[0][STACK_SIZE] );
1173 pthread_attr_setstacksize( &attr, STACK_SIZE );
1174 pthread_create( &semaphore_test_thread_handle,
1181 for (i = 0; i < nsemaphores; i++) {
1182 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1183 HAL_CLOCK_READ(&semaphore_ft[i].start);
1184 sem_post(&test_semaphores[0]);
1187 pthread_join(semaphore_test_thread_handle, &retval);
1189 show_times(semaphore_ft, nsemaphores, "Post/Wait semaphore");
1194 //--------------------------------------------------------------------------
1196 // Timer callback function
1198 sigrt0(int signo, siginfo_t *info, void *context)
1200 diag_printf("sigrt0 called\n");
1204 // Callback used to test determinancy
1205 static volatile int timer_cnt;
1207 sigrt1(int signo, siginfo_t *info, void *context)
1209 if (timer_cnt == nscheds) return;
1210 sched_ft[timer_cnt].start = 0;
1211 HAL_CLOCK_READ(&sched_ft[timer_cnt++].end);
1212 if (timer_cnt == nscheds) {
1217 static sem_t timer_sem;
1220 sigrt2(int signo, siginfo_t *info, void *context)
1222 if (timer_cnt == nscheds) {
1224 sem_post(&timer_sem);
1226 sched_ft[timer_cnt].start = 0;
1227 sem_post(&timer_sem);
1231 // Null thread, used to keep scheduler busy
1233 timer_test(void * id)
1237 pthread_testcancel();
1243 // Thread that suspends itself at the first opportunity
1245 timer_test2(void *id)
1247 while (timer_cnt != nscheds) {
1248 HAL_CLOCK_READ(&sched_ft[timer_cnt++].end);
1249 sem_wait(&timer_sem);
1255 run_timer_tests(void)
1259 struct sigaction sa;
1260 struct sigevent sigev;
1261 struct itimerspec tp;
1263 // Install signal handlers
1264 sigemptyset( &sa.sa_mask );
1265 sa.sa_flags = SA_SIGINFO;
1267 sa.sa_sigaction = sigrt0;
1268 sigaction( SIGRTMIN, &sa, NULL );
1270 sa.sa_sigaction = sigrt1;
1271 sigaction( SIGRTMIN+1, &sa, NULL );
1273 sa.sa_sigaction = sigrt2;
1274 sigaction( SIGRTMIN+2, &sa, NULL );
1276 // Set up common bits of sigevent
1278 sigev.sigev_notify = SIGEV_SIGNAL;
1280 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1281 for (i = 0; i < ntimers; i++) {
1282 HAL_CLOCK_READ(&timer_ft[i].start);
1283 sigev.sigev_signo = SIGRTMIN;
1284 sigev.sigev_value.sival_ptr = (void*)(&timers[i]);
1285 res = timer_create( CLOCK_REALTIME, &sigev, &timers[i]);
1286 HAL_CLOCK_READ(&timer_ft[i].end);
1287 CYG_ASSERT( res == 0 , "timer_create() returned error");
1289 show_times(timer_ft, ntimers, "Create timer");
1292 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1293 tp.it_value.tv_sec = 0;
1294 tp.it_value.tv_nsec = 0;
1295 tp.it_interval.tv_sec = 0;
1296 tp.it_interval.tv_nsec = 0;
1297 for (i = 0; i < ntimers; i++) {
1298 HAL_CLOCK_READ(&timer_ft[i].start);
1299 res = timer_settime( timers[i], 0, &tp, NULL );
1300 HAL_CLOCK_READ(&timer_ft[i].end);
1301 CYG_ASSERT( res == 0 , "timer_settime() returned error");
1303 show_times(timer_ft, ntimers, "Initialize timer to zero");
1305 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1306 tp.it_value.tv_sec = 1;
1307 tp.it_value.tv_nsec = 250000000;
1308 tp.it_interval.tv_sec = 0;
1309 tp.it_interval.tv_nsec = 0;
1310 for (i = 0; i < ntimers; i++) {
1311 HAL_CLOCK_READ(&timer_ft[i].start);
1312 res = timer_settime( timers[i], 0, &tp, NULL );
1313 HAL_CLOCK_READ(&timer_ft[i].end);
1314 CYG_ASSERT( res == 0 , "timer_settime() returned error");
1316 show_times(timer_ft, ntimers, "Initialize timer to 1.25 sec");
1318 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1319 tp.it_value.tv_sec = 0;
1320 tp.it_value.tv_nsec = 0;
1321 tp.it_interval.tv_sec = 0;
1322 tp.it_interval.tv_nsec = 0;
1323 for (i = 0; i < ntimers; i++) {
1324 HAL_CLOCK_READ(&timer_ft[i].start);
1325 res = timer_settime( timers[i], 0, &tp, NULL );
1326 HAL_CLOCK_READ(&timer_ft[i].end);
1327 CYG_ASSERT( res == 0 , "timer_settime() returned error");
1329 show_times(timer_ft, ntimers, "Disable timer");
1331 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1332 for (i = 0; i < ntimers; i++) {
1333 HAL_CLOCK_READ(&timer_ft[i].start);
1334 res = timer_delete( timers[i] );
1335 HAL_CLOCK_READ(&timer_ft[i].end);
1336 CYG_ASSERT( res == 0 , "timer_settime() returned error");
1338 show_times(timer_ft, ntimers, "Delete timer");
1342 sigev.sigev_signo = SIGRTMIN+1;
1343 sigev.sigev_value.sival_ptr = (void*)(&timers[i]);
1344 res = timer_create( CLOCK_REALTIME, &sigev, &timers[0]);
1345 CYG_ASSERT( res == 0 , "timer_create() returned error");
1346 tp.it_value.tv_sec = 0;
1347 tp.it_value.tv_nsec = 50000000;
1348 tp.it_interval.tv_sec = 0;
1349 tp.it_interval.tv_nsec = 50000000;;
1351 res = timer_settime( timers[0], 0, &tp, NULL );
1352 CYG_ASSERT( res == 0 , "timer_settime() returned error");
1353 sem_init(&synchro, 0, 0);
1354 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1356 { res = sem_wait(&synchro);
1357 } while( res == -1 && errno == EINTR );
1358 CYG_ASSERT( res == 0 , "sem_wait() returned error");
1359 tp.it_value.tv_sec = 0;
1360 tp.it_value.tv_nsec = 0;
1361 tp.it_interval.tv_sec = 0;
1362 tp.it_interval.tv_nsec = 0;
1363 res = timer_settime( timers[0], 0, &tp, NULL );
1364 CYG_ASSERT( res == 0 , "timer_settime() returned error");
1365 res = timer_delete( timers[0] );
1366 CYG_ASSERT( res == 0 , "timer_delete() returned error");
1367 show_times(sched_ft, nscheds, "Timer latency [0 threads]");
1372 struct sched_param schedparam;
1373 pthread_attr_t attr;
1376 // Set my priority higher than any I plan to create
1377 schedparam.sched_priority = 20;
1378 pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
1381 // Initiaize thread creation attributes
1383 pthread_attr_init( &attr );
1384 pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
1385 pthread_attr_setschedpolicy( &attr, SCHED_RR );
1386 schedparam.sched_priority = 10;
1387 pthread_attr_setschedparam( &attr, &schedparam );
1389 for (i = 0; i < 2; i++) {
1390 pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
1391 pthread_attr_setstacksize( &attr, STACK_SIZE );
1392 res = pthread_create( &threads[i],
1397 CYG_ASSERT( res == 0 , "pthread_create() returned error");
1400 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1402 sigev.sigev_signo = SIGRTMIN+1;
1403 sigev.sigev_value.sival_ptr = (void*)(&timers[i]);
1404 res = timer_create( CLOCK_REALTIME, &sigev, &timers[0]);
1405 CYG_ASSERT( res == 0 , "timer_create() returned error");
1406 tp.it_value.tv_sec = 0;
1407 tp.it_value.tv_nsec = 50000000;
1408 tp.it_interval.tv_sec = 0;
1409 tp.it_interval.tv_nsec = 50000000;;
1411 res = timer_settime( timers[0], 0, &tp, NULL );
1412 CYG_ASSERT( res == 0 , "timer_settime() returned error");
1414 sem_init(&synchro, 0, 0);
1416 { res = sem_wait(&synchro);
1417 } while( res == -1 && errno == EINTR );
1418 CYG_ASSERT( res == 0 , "sem_wait() returned error");
1419 res = timer_delete(timers[0]);
1420 CYG_ASSERT( res == 0 , "timerdelete() returned error");
1421 show_times(sched_ft, nscheds, "Timer latency [2 threads]");
1422 for (i = 0; i < 2; i++) {
1423 pthread_cancel(threads[i]);
1424 pthread_join(threads[i], &retval);
1429 for (i = 0; i < ntest_threads; i++) {
1430 pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
1431 pthread_attr_setstacksize( &attr, STACK_SIZE );
1432 res = pthread_create( &threads[i],
1437 CYG_ASSERT( res == 0 , "pthread_create() returned error");
1439 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1440 sigev.sigev_signo = SIGRTMIN+1;
1441 sigev.sigev_value.sival_ptr = (void*)(&timers[i]);
1442 res = timer_create( CLOCK_REALTIME, &sigev, &timers[0]);
1443 CYG_ASSERT( res == 0 , "timer_create() returned error");
1444 tp.it_value.tv_sec = 0;
1445 tp.it_value.tv_nsec = 50000000;
1446 tp.it_interval.tv_sec = 0;
1447 tp.it_interval.tv_nsec = 50000000;;
1449 res = timer_settime( timers[0], 0, &tp, NULL );
1450 CYG_ASSERT( res == 0 , "timer_settime() returned error");
1452 sem_init(&synchro, 0, 0);
1454 { res = sem_wait(&synchro);
1455 } while( res == -1 && errno == EINTR );
1456 CYG_ASSERT( res == 0 , "sem_wait() returned error");
1457 res = timer_delete(timers[0]);
1458 CYG_ASSERT( res == 0 , "timerdelete() returned error");
1459 show_times(sched_ft, nscheds, "Timer latency [many threads]");
1460 for (i = 0; i < ntest_threads; i++) {
1461 pthread_cancel(threads[i]);
1462 pthread_join(threads[i], &retval);
1465 sem_init(&synchro, 0, 0);
1466 sem_init(&timer_sem, 0, 0);
1467 pthread_attr_setstackaddr( &attr, &stacks[0][STACK_SIZE] );
1468 pthread_attr_setstacksize( &attr, STACK_SIZE );
1469 res = pthread_create( &threads[0],
1474 CYG_ASSERT( res == 0 , "pthread_create() returned error");
1476 wait_for_tick(); // Wait until the next clock tick to minimize aberations
1477 sigev.sigev_signo = SIGRTMIN+2;
1478 sigev.sigev_value.sival_ptr = (void*)(threads[0]);
1479 res = timer_create( CLOCK_REALTIME, &sigev, &timers[0]);
1480 CYG_ASSERT( res == 0 , "timer_create() returned error");
1481 tp.it_value.tv_sec = 0;
1482 tp.it_value.tv_nsec = 50000000;
1483 tp.it_interval.tv_sec = 0;
1484 tp.it_interval.tv_nsec = 50000000;;
1486 res = timer_settime( timers[0], 0, &tp, NULL );
1487 CYG_ASSERT( res == 0 , "timer_settime() returned error");
1490 { res = sem_wait(&synchro);
1491 } while( res == -1 && errno == EINTR );
1492 CYG_ASSERT( res == 0 , "sem_wait() returned error");
1493 res = timer_delete(timers[0]);
1494 CYG_ASSERT( res == 0 , "timerdelete() returned error");
1495 show_times(sched_ft, nscheds, "Timer -> thread post latency");
1496 sem_post(&timer_sem);
1497 // pthread_cancel(threads[0]);
1498 pthread_join(threads[0], &retval);
1501 end_of_test_group();
1505 //--------------------------------------------------------------------------
1511 cyg_uint32 tv[nsamples], tv0, tv1;
1512 // cyg_uint32 min_stack, max_stack, total_stack, actual_stack, j;
1513 cyg_tick_count_t ticks, tick0, tick1;
1514 #ifdef CYG_SCHEDULER_LOCK_TIMINGS
1515 cyg_uint32 lock_ave, lock_max;
1517 #if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_LATENCY) && defined(HAL_CLOCK_LATENCY)
1518 cyg_int32 clock_ave;
1521 disable_clock_latency_measurement();
1523 // cyg_test_dump_thread_stack_stats( "Startup, main stack", thread[0] );
1524 cyg_test_dump_interrupt_stack_stats( "Startup" );
1525 cyg_test_dump_idlethread_stack_stats( "Startup" );
1526 cyg_test_clear_interrupt_stack();
1528 diag_printf("\neCos Kernel Timings\n");
1529 diag_printf("Notes: all times are in microseconds (.000001) unless otherwise stated\n");
1530 #ifdef STATS_WITHOUT_FIRST_SAMPLE
1531 diag_printf(" second line of results have first sample removed\n");
1534 cyg_thread_delay(2); // Make sure the clock is actually running
1536 ns_per_system_clock = 1000000/rtc_resolution[1];
1538 for (i = 0; i < nsamples; i++) {
1539 HAL_CLOCK_READ(&tv[i]);
1542 for (i = 1; i < nsamples; i++) {
1543 tv0 += tv[i] - tv[i-1];
1545 end_of_test_group();
1547 overhead = tv0 / (nsamples-1);
1548 diag_printf("Reading the hardware clock takes %d 'ticks' overhead\n", overhead);
1549 diag_printf("... this value will be factored out of all other measurements\n");
1551 // Try and measure how long the clock interrupt handling takes
1552 for (i = 0; i < nsamples; i++) {
1553 tick0 = cyg_current_time();
1555 tick1 = cyg_current_time();
1556 if (tick0 != tick1) break;
1558 HAL_CLOCK_READ(&tv[i]);
1561 for (i = 0; i < nsamples; i++) {
1562 tv1 += tv[i] * 1000;
1564 tv1 = tv1 / nsamples;
1565 tv1 -= overhead; // Adjust out the cost of getting the timer value
1566 diag_printf("Clock interrupt took");
1567 show_ticks_in_us(tv1);
1568 diag_printf(" microseconds (%d raw clock ticks)\n", tv1/1000);
1569 enable_clock_latency_measurement();
1571 ticks = cyg_current_time();
1573 show_test_parameters();
1576 reset_clock_latency_measurement();
1580 // run_mbox_tests();
1581 run_semaphore_tests();
1584 #ifdef CYG_SCHEDULER_LOCK_TIMINGS
1585 Cyg_Scheduler::get_lock_times(&lock_ave, &lock_max);
1586 diag_printf("\nMax lock:");
1587 show_ticks_in_us(lock_max);
1588 diag_printf(", Ave lock:");
1589 show_ticks_in_us(lock_ave);
1593 #if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_LATENCY) && defined(HAL_CLOCK_LATENCY)
1594 // Display latency figures in same format as all other numbers
1595 disable_clock_latency_measurement();
1596 clock_ave = (total_clock_latency*1000) / total_clock_interrupts;
1597 show_ticks_in_us(clock_ave);
1598 show_ticks_in_us(min_clock_latency*1000);
1599 show_ticks_in_us(max_clock_latency*1000);
1600 show_ticks_in_us(0);
1601 diag_printf(" Clock/interrupt latency\n\n");
1602 enable_clock_latency_measurement();
1605 #if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_DSR_LATENCY)
1606 disable_clock_latency_measurement();
1607 clock_ave = (total_clock_dsr_latency*1000) / total_clock_dsr_calls;
1608 show_ticks_in_us(clock_ave);
1609 show_ticks_in_us(min_clock_dsr_latency*1000);
1610 show_ticks_in_us(max_clock_dsr_latency*1000);
1611 show_ticks_in_us(0);
1612 diag_printf(" Clock DSR latency\n\n");
1613 enable_clock_latency_measurement();
1617 disable_clock_latency_measurement();
1618 min_stack = STACK_SIZE;
1621 for (i = 0; i < (int)NTEST_THREADS; i++) {
1622 for (j = 0; j < STACK_SIZE; j++) {
1623 if (stacks[i][j]) break;
1625 actual_stack = STACK_SIZE-j;
1626 if (actual_stack < min_stack) min_stack = actual_stack;
1627 if (actual_stack > max_stack) max_stack = actual_stack;
1628 total_stack += actual_stack;
1630 for (j = 0; j < STACKSIZE; j++) {
1631 if (((char *)stack[0])[j]) break;
1633 diag_printf("%5d %5d %5d (main stack: %5d) Thread stack used (%d total)\n",
1634 total_stack/NTEST_THREADS, min_stack, max_stack,
1635 STACKSIZE - j, STACK_SIZE);
1638 // cyg_test_dump_thread_stack_stats( "All done, main stack", thread[0] );
1639 cyg_test_dump_interrupt_stack_stats( "All done" );
1640 cyg_test_dump_idlethread_stack_stats( "All done" );
1642 enable_clock_latency_measurement();
1644 ticks = cyg_current_time();
1645 diag_printf("\nTiming complete - %d ms total\n\n", (int)((ticks*ns_per_system_clock)/1000));
1647 CYG_TEST_PASS_FINISH("Basic timing OK");
1650 int main( int argc, char **argv )
1654 if (cyg_test_is_simulator) {
1655 nsamples = NSAMPLES_SIM;
1656 ntest_threads = NTEST_THREADS_SIM;
1657 nthread_switches = NTHREAD_SWITCHES_SIM;
1658 nmutexes = NMUTEXES_SIM;
1659 nmboxes = NMBOXES_SIM;
1660 nsemaphores = NSEMAPHORES_SIM;
1661 nscheds = NSCHEDS_SIM;
1662 ntimers = NTIMERS_SIM;
1664 nsamples = NSAMPLES;
1665 ntest_threads = NTEST_THREADS;
1666 nthread_switches = NTHREAD_SWITCHES;
1667 nmutexes = NMUTEXES;
1669 nsemaphores = NSEMAPHORES;
1675 #ifdef WORKHORSE_TEST
1676 ntest_threads = max(512, ntest_threads);
1677 nmutexes = max(1024, nmutexes);
1678 nsemaphores = max(1024, nsemaphores);
1679 nmboxes = max(1024, nmboxes);
1680 ncounters = max(1024, ncounters);
1681 ntimers = max(1024, ntimers);
1683 ntest_threads = max(64, ntest_threads);
1684 nmutexes = max(32, nmutexes);
1685 nsemaphores = max(32, nsemaphores);
1686 nmboxes = max(32, nmboxes);
1687 ntimers = max(32, ntimers);
1694 #endif // CYGFUN_KERNEL_API_C, etc.