1 //==========================================================================
5 // Scheduler class implementations
7 //==========================================================================
8 //####ECOSGPLCOPYRIGHTBEGIN####
9 // -------------------------------------------
10 // This file is part of eCos, the Embedded Configurable Operating System.
11 // Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.
13 // eCos is free software; you can redistribute it and/or modify it under
14 // the terms of the GNU General Public License as published by the Free
15 // Software Foundation; either version 2 or (at your option) any later version.
17 // eCos is distributed in the hope that it will be useful, but WITHOUT ANY
18 // WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 // You should have received a copy of the GNU General Public License along
23 // with eCos; if not, write to the Free Software Foundation, Inc.,
24 // 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
26 // As a special exception, if other files instantiate templates or use macros
27 // or inline functions from this file, or you compile this file and link it
28 // with other works to produce a work based on this file, this file does not
29 // by itself cause the resulting work to be covered by the GNU General Public
30 // License. However the source code for this file must still be made available
31 // in accordance with section (3) of the GNU General Public License.
33 // This exception does not invalidate any other reasons why a work based on
34 // this file might be covered by the GNU General Public License.
36 // Alternative licenses for eCos may be arranged by contacting Red Hat, Inc.
37 // at http://sources.redhat.com/ecos/ecos-license/
38 // -------------------------------------------
39 //####ECOSGPLCOPYRIGHTEND####
40 //==========================================================================
41 //#####DESCRIPTIONBEGIN####
44 // Contributors: nickg
46 // Purpose: Scheduler class implementation
47 // Description: This file contains the definitions of the scheduler class
48 // member functions that are common to all scheduler
51 //####DESCRIPTIONEND####
53 //==========================================================================
55 #include <pkgconf/kernel.h>
57 #include <cyg/kernel/ktypes.h> // base kernel types
58 #include <cyg/infra/cyg_trac.h> // tracing macros
59 #include <cyg/infra/cyg_ass.h> // assertion macros
60 #include <cyg/kernel/instrmnt.h> // instrumentation
62 #include <cyg/kernel/sched.hxx> // our header
64 #include <cyg/kernel/thread.hxx> // thread classes
65 #include <cyg/kernel/intr.hxx> // Interrupt interface
67 #include <cyg/hal/hal_arch.h> // Architecture specific definitions
69 #include <cyg/kernel/thread.inl> // thread inlines
70 #include <cyg/kernel/sched.inl> // scheduler inlines
72 //-------------------------------------------------------------------------
73 // Some local tracing control - a default.
74 #ifdef CYGDBG_USE_TRACING
75 # if !defined( CYGDBG_INFRA_DEBUG_TRACE_ASSERT_SIMPLE ) && \
76 !defined( CYGDBG_INFRA_DEBUG_TRACE_ASSERT_FANCY )
77 // ie. not a tracing implementation that takes a long time to output
79 # ifndef CYGDBG_KERNEL_TRACE_UNLOCK_INNER
80 # define CYGDBG_KERNEL_TRACE_UNLOCK_INNER
81 # endif // control not already defined
83 # endif // trace implementation not ..._SIMPLE && not ..._FANCY
84 #endif // CYGDBG_USE_TRACING
86 // -------------------------------------------------------------------------
87 // Static Cyg_Scheduler class members
89 // We start with sched_lock at 1 so that any kernel code we
90 // call during initialization will not try to reschedule.
92 CYGIMP_KERNEL_SCHED_LOCK_DEFINITIONS;
94 Cyg_Thread *volatile Cyg_Scheduler_Base::current_thread[CYGNUM_KERNEL_CPU_MAX];
96 volatile cyg_bool Cyg_Scheduler_Base::need_reschedule[CYGNUM_KERNEL_CPU_MAX];
98 Cyg_Scheduler Cyg_Scheduler::scheduler CYG_INIT_PRIORITY( SCHEDULER );
100 volatile cyg_ucount32 Cyg_Scheduler_Base::thread_switches[CYGNUM_KERNEL_CPU_MAX];
102 #ifdef CYGPKG_KERNEL_SMP_SUPPORT
104 CYG_BYTE cyg_sched_cpu_interrupt[CYGNUM_KERNEL_CPU_MAX][sizeof(Cyg_Interrupt)]
105 CYGBLD_ANNOTATE_VARIABLE_SCHED;
107 __externC cyg_ISR cyg_hal_cpu_message_isr;
108 __externC cyg_DSR cyg_hal_cpu_message_dsr;
110 inline void *operator new(size_t size, void *ptr) { return ptr; };
114 // -------------------------------------------------------------------------
115 // Scheduler unlock function.
117 // This is only called when there is the potential for real work to be
118 // done. Other cases are handled in Cyg_Scheduler::unlock() which is
119 // an inline; _or_ this function may have been called from
120 // Cyg_Scheduler::reschedule(), or Cyg_Scheduler::unlock_reschedule. The
121 // new_lock argument contains the value that the scheduler lock should
122 // have after this function has completed. If it is zero then the lock is
123 // being released and some extra work (running ASRs, checking for DSRs) is
124 // done before returning. If it is non-zero then it must equal the
125 // current value of the lock, and is used to indicate that we want to
126 // reacquire the scheduler lock before returning. This latter option
127 // only makes any sense if the current thread is no longer runnable,
128 // e.g. sleeping, otherwise this function will do nothing.
129 // This approach of passing in the lock value at the end effectively
130 // makes the scheduler lock a form of per-thread variable. Each call
131 // to unlock_inner() carries with it the value the scheduler should
132 // have when it reschedules this thread back, and leaves this function.
133 // When it is non-zero, and the thread is rescheduled, no ASRS are run,
134 // or DSRs processed. By doing this, it makes it possible for threads
135 // that want to go to sleep to wake up with the scheduler lock in the
136 // same state it was in before.
138 void Cyg_Scheduler::unlock_inner( cyg_ucount32 new_lock )
140 #ifdef CYGDBG_KERNEL_TRACE_UNLOCK_INNER
141 CYG_REPORT_FUNCTION();
146 CYG_PRECONDITION( new_lock==0 ? get_sched_lock() == 1 :
147 ((get_sched_lock() == new_lock) || (get_sched_lock() == new_lock+1)),
148 "sched_lock not at expected value" );
150 #ifdef CYGIMP_KERNEL_INTERRUPTS_DSRS
152 // Call any pending DSRs. Do this here to ensure that any
153 // threads that get awakened are properly scheduled.
155 if( new_lock == 0 && Cyg_Interrupt::DSRs_pending() )
156 Cyg_Interrupt::call_pending_DSRs();
159 Cyg_Thread *current = get_current_thread();
161 CYG_ASSERTCLASS( current, "Bad current thread" );
163 #ifdef CYGFUN_KERNEL_ALL_THREADS_STACK_CHECKING
164 // should have CYGVAR_KERNEL_THREADS_LIST
165 current = Cyg_Thread::get_list_head();
167 current->check_stack();
168 current = current->get_list_next();
170 current = get_current_thread();
173 #ifdef CYGFUN_KERNEL_THREADS_STACK_CHECKING
174 current->check_stack();
177 // If the current thread is going to sleep, or someone
178 // wants a reschedule, choose another thread to run
180 if( current->state != Cyg_Thread::RUNNING || get_need_reschedule() ) {
182 CYG_INSTRUMENT_SCHED(RESCHEDULE,0,0);
184 // Get the next thread to run from scheduler
185 Cyg_Thread *next = scheduler.schedule();
187 CYG_CHECK_DATA_PTR( next, "Invalid next thread pointer");
188 CYG_ASSERTCLASS( next, "Bad next thread" );
190 if( current != next )
193 CYG_INSTRUMENT_THREAD(SWITCH,current,next);
195 // Count this thread switch
196 thread_switches[CYG_KERNEL_CPU_THIS()]++;
198 #ifdef CYGFUN_KERNEL_THREADS_STACK_CHECKING
199 next->check_stack(); // before running it
203 HAL_THREAD_SWITCH_CONTEXT( ¤t->stack_ptr,
206 // Worry here about possible compiler
207 // optimizations across the above call that may try to
208 // propogate common subexpresions. We would end up
209 // with the expression from one thread in its
210 // successor. This is only a worry if we do not save
211 // and restore the complete register set. We need a
212 // way of marking functions that return into a
213 // different context. A temporary fix would be to
214 // disable CSE (-fdisable-cse) in the compiler.
216 // We return here only when the current thread is
217 // rescheduled. There is a bit of housekeeping to do
218 // here before we are allowed to go on our way.
220 CYG_CHECK_DATA_PTR( current, "Invalid current thread pointer");
221 CYG_ASSERTCLASS( current, "Bad current thread" );
223 current_thread[CYG_KERNEL_CPU_THIS()] = current; // restore current thread pointer
226 #ifdef CYGSEM_KERNEL_SCHED_TIMESLICE
227 // Reset the timeslice counter so that this thread gets a full
229 reset_timeslice_count();
232 clear_need_reschedule(); // finished rescheduling
238 #ifdef CYGSEM_KERNEL_SCHED_ASR_SUPPORT
240 // Check whether the ASR is pending and not inhibited. If
241 // we can call it, then transfer this info to a local
242 // variable (call_asr) and clear the pending flag. Note
243 // that we only do this if the scheduler lock is about to
244 // be zeroed. In any other circumstance we are not
247 cyg_bool call_asr = false;
249 if( (current->asr_inhibit == 0) && current->asr_pending )
252 current->asr_pending = false;
256 HAL_REORDER_BARRIER(); // Make sure everything above has happened
258 zero_sched_lock(); // Clear the lock
259 HAL_REORDER_BARRIER();
261 #ifdef CYGIMP_KERNEL_INTERRUPTS_DSRS
263 // Now check whether any DSRs got posted during the thread
264 // switch and if so, go around again. Making this test after
265 // the lock has been zeroed avoids a race condition in which
266 // a DSR could have been posted during a reschedule, but would
267 // not be run until the _next_ time we release the sched lock.
269 if( Cyg_Interrupt::DSRs_pending() ) {
270 inc_sched_lock(); // reclaim the lock
271 continue; // go back to head of loop
275 // Otherwise the lock is zero, we can return.
277 // CYG_POSTCONDITION( get_sched_lock() == 0, "sched_lock not zero" );
279 #ifdef CYGSEM_KERNEL_SCHED_ASR_SUPPORT
280 // If the test within the sched_lock indicating that the ASR
281 // be called was true, call it here. Calling the ASR must be
282 // the very last thing we do here, since it must run as close
283 // to "user" state as possible.
285 if( call_asr ) current->asr(current->asr_data);
291 // If new_lock is non-zero then we restore the sched_lock to
294 HAL_REORDER_BARRIER();
296 set_sched_lock(new_lock);
298 HAL_REORDER_BARRIER();
301 #ifdef CYGDBG_KERNEL_TRACE_UNLOCK_INNER
308 CYG_FAIL( "Should not be executed" );
311 // -------------------------------------------------------------------------
312 // Start the scheduler. This is called after the initial threads have been
313 // created to start scheduling. It gets any other CPUs running, and then
314 // enters the scheduler.
316 void Cyg_Scheduler::start()
318 CYG_REPORT_FUNCTION();
320 #ifdef CYGPKG_KERNEL_SMP_SUPPORT
322 HAL_SMP_CPU_TYPE cpu;
324 for( cpu = 0; cpu < CYG_KERNEL_CPU_COUNT(); cpu++ )
326 // Don't start this CPU, it is running already!
327 if( cpu == CYG_KERNEL_CPU_THIS() )
330 CYG_KERNEL_CPU_START( cpu );
338 // -------------------------------------------------------------------------
339 // Start scheduling on this CPU. This is called on each CPU in the system
340 // when it is started.
342 void Cyg_Scheduler::start_cpu()
344 CYG_REPORT_FUNCTION();
346 #ifdef CYGPKG_KERNEL_SMP_SUPPORT
348 // Set up the inter-CPU interrupt for this CPU
350 Cyg_Interrupt * intr = new( (void *)&cyg_sched_cpu_interrupt[HAL_SMP_CPU_THIS()] )
351 Cyg_Interrupt( CYGNUM_HAL_SMP_CPU_INTERRUPT_VECTOR( HAL_SMP_CPU_THIS() ),
354 cyg_hal_cpu_message_isr,
355 cyg_hal_cpu_message_dsr
358 intr->set_cpu( intr->get_vector(), HAL_SMP_CPU_THIS() );
362 intr->unmask_interrupt( intr->get_vector() );
366 // Get the first thread to run from scheduler
367 register Cyg_Thread *next = scheduler.schedule();
369 CYG_ASSERTCLASS( next, "Bad initial thread" );
371 clear_need_reschedule(); // finished rescheduling
372 set_current_thread(next); // restore current thread pointer
374 #ifdef CYGVAR_KERNEL_COUNTERS_CLOCK
375 // Reference the real time clock. This ensures that at least one
376 // reference to the kernel_clock.o object exists, without which
377 // the object will not be included while linking.
378 CYG_REFERENCE_OBJECT( Cyg_Clock::real_time_clock );
381 // Load the first thread. This will also enable interrupts since
382 // the initial state of all threads is to have interrupts enabled.
384 HAL_THREAD_LOAD_CONTEXT( &next->stack_ptr );
388 // -------------------------------------------------------------------------
389 // SMP support functions
391 #ifdef CYGPKG_KERNEL_SMP_SUPPORT
393 // This is called on each secondary CPU on its interrupt stack after
394 // the initial CPU has initialized the world.
396 externC void cyg_kernel_smp_startup()
398 CYG_INSTRUMENT_SMP( CPU_START, CYG_KERNEL_CPU_THIS(), 0 );
399 Cyg_Scheduler::lock();
400 Cyg_Scheduler::start_cpu();
403 // This is called from the DSR of the inter-CPU interrupt to cause a
404 // reschedule when the scheduler lock is zeroed.
406 __externC void cyg_scheduler_set_need_reschedule()
408 CYG_INSTRUMENT_SMP( RESCHED_RECV, 0, 0 );
409 Cyg_Scheduler::need_reschedule[HAL_SMP_CPU_THIS()] = true;
414 // -------------------------------------------------------------------------
415 // Consistency checker
417 #ifdef CYGDBG_USE_ASSERTS
419 cyg_bool Cyg_Scheduler::check_this( cyg_assert_class_zeal zeal) const
421 CYG_REPORT_FUNCTION();
423 // check that we have a non-NULL pointer first
424 if( this == NULL ) return false;
428 case cyg_system_test:
431 if( !get_current_thread()->check_this(zeal) ) return false;
444 //==========================================================================
445 // SchedThread members
447 // -------------------------------------------------------------------------
448 // Static data members
450 #ifdef CYGSEM_KERNEL_SCHED_ASR_SUPPORT
452 # ifdef CYGSEM_KERNEL_SCHED_ASR_GLOBAL
453 Cyg_ASR *Cyg_SchedThread::asr = &Cyg_SchedThread::asr_default;
456 # ifdef CYGSEM_KERNEL_SCHED_ASR_DATA_GLOBAL
457 CYG_ADDRWORD Cyg_SchedThread::asr_data = 0;
460 #endif // CYGSEM_KERNEL_SCHED_ASR_SUPPORT
462 // -------------------------------------------------------------------------
465 Cyg_SchedThread::Cyg_SchedThread(Cyg_Thread *thread, CYG_ADDRWORD sched_info)
466 : Cyg_SchedThread_Implementation(sched_info)
468 CYG_REPORT_FUNCTION();
472 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL
476 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_SIMPLE
478 priority_inherited = false;
483 #ifdef CYGSEM_KERNEL_SCHED_ASR_SUPPORT
488 #ifndef CYGSEM_KERNEL_SCHED_ASR_GLOBAL
491 #ifdef CYGSEM_KERNEL_SCHED_ASR_DATA_GLOBAL
498 // -------------------------------------------------------------------------
499 // ASR support functions
501 #ifdef CYGSEM_KERNEL_SCHED_ASR_SUPPORT
503 // -------------------------------------------------------------------------
505 // Install a new ASR, returning the old one.
507 void Cyg_SchedThread::set_asr( Cyg_ASR *new_asr, CYG_ADDRWORD new_data,
508 Cyg_ASR **old_asr, CYG_ADDRWORD *old_data)
510 CYG_REPORT_FUNCTION();
512 // Do this with the scheduler locked...
513 Cyg_Scheduler::lock();
515 if( old_asr != NULL ) *old_asr = asr;
516 if( old_data != NULL ) *old_data = asr_data;
518 // If new_asr is NULL, do not change the ASR,
519 // but only change the data.
520 if( new_asr != NULL ) asr = new_asr;
523 Cyg_Scheduler::unlock();
526 // -------------------------------------------------------------------------
529 void Cyg_SchedThread::clear_asr()
531 CYG_REPORT_FUNCTION();
533 // Do this with the scheduler locked...
534 Cyg_Scheduler::lock();
536 // Reset ASR to default.
540 Cyg_Scheduler::unlock();
543 // -------------------------------------------------------------------------
544 // Default ASR function.
545 // having this avoids our having to worry about ever seeing a NULL
546 // pointer as the ASR function.
548 void Cyg_SchedThread::asr_default(CYG_ADDRWORD data)
550 CYG_REPORT_FUNCTION();
558 // -------------------------------------------------------------------------
559 // Generic priority protocol support
561 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL
563 void Cyg_SchedThread::set_inherited_priority( cyg_priority pri, Cyg_Thread *thread )
565 CYG_REPORT_FUNCTION();
567 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_SIMPLE
569 // This is the comon code for priority inheritance and ceiling
570 // protocols. This implementation provides a simplified version of
573 Cyg_Thread *self = CYG_CLASSFROMBASE(Cyg_Thread,
577 CYG_ASSERT( mutex_count > 0, "Non-positive mutex count");
579 // Compare with *current* priority in case thread has already
580 // inherited - for relay case below.
583 cyg_priority mypri = priority;
584 cyg_bool already_inherited = priority_inherited;
586 // If this is first inheritance, copy the old pri
587 // and set inherited flag. We clear it before setting the
588 // pri since set_priority() is inheritance aware.
589 // This is called with the sched locked, so no race conditions.
591 priority_inherited = false; // so that set_prio DTRT
593 self->set_priority( pri );
595 if( !already_inherited )
596 original_priority = mypri;
598 priority_inherited = true; // regardless, because it is now
605 void Cyg_SchedThread::relay_inherited_priority( Cyg_Thread *ex_owner, Cyg_ThreadQueue *pqueue)
607 CYG_REPORT_FUNCTION();
609 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_SIMPLE
611 // A simple implementation of priority inheritance.
612 // At its simplest, this member does nothing.
614 // If there is anyone else waiting, then the *new* owner inherits from
615 // the current one, since that is a maxima of the others waiting.
616 // (It's worth not doing if there's nobody waiting to prevent
617 // unneccessary priority skew.) This could be viewed as a discovered
620 if ( !pqueue->empty() )
621 set_inherited_priority( ex_owner->get_current_priority(), ex_owner );
626 void Cyg_SchedThread::clear_inherited_priority()
628 CYG_REPORT_FUNCTION();
630 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_SIMPLE
632 // A simple implementation of priority inheritance/ceiling
633 // protocols. The simplification in this algorithm is that we do
634 // not reduce our priority until we have freed all mutexes
635 // claimed. Hence we can continue to run at an artificially high
636 // priority even when we should not. However, since nested
637 // mutexes are rare, the thread we have inherited from is likely
638 // to be locking the same mutexes we are, and mutex claim periods
639 // should be very short, the performance difference between this
640 // and a more complex algorithm should be negligible. The most
641 // important advantage of this algorithm is that it is fast and
644 Cyg_Thread *self = CYG_CLASSFROMBASE(Cyg_Thread,
648 CYG_ASSERT( mutex_count >= 0, "Non-positive mutex count");
650 if( mutex_count == 0 && priority_inherited )
652 priority_inherited = false;
654 // Only make an effort if the priority must change
655 if( priority < original_priority )
656 self->set_priority( original_priority );
663 #endif // CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL
665 // -------------------------------------------------------------------------
666 // Priority inheritance support.
668 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_INHERIT
670 // -------------------------------------------------------------------------
671 // Inherit the priority of the provided thread if it
672 // has a higher priority than ours.
674 void Cyg_SchedThread::inherit_priority( Cyg_Thread *thread)
676 CYG_REPORT_FUNCTION();
678 Cyg_Thread *self = CYG_CLASSFROMBASE(Cyg_Thread,
682 CYG_ASSERT( mutex_count > 0, "Non-positive mutex count");
683 CYG_ASSERT( self != thread, "Trying to inherit from self!");
685 self->set_inherited_priority( thread->get_current_priority(), thread );
689 // -------------------------------------------------------------------------
690 // Inherit the priority of the ex-owner thread or from the queue if it
691 // has a higher priority than ours.
693 void Cyg_SchedThread::relay_priority( Cyg_Thread *ex_owner, Cyg_ThreadQueue *pqueue)
695 CYG_REPORT_FUNCTION();
697 relay_inherited_priority( ex_owner, pqueue );
700 // -------------------------------------------------------------------------
701 // Lose a priority inheritance
703 void Cyg_SchedThread::disinherit_priority()
705 CYG_REPORT_FUNCTION();
707 CYG_ASSERT( mutex_count >= 0, "Non-positive mutex count");
709 clear_inherited_priority();
712 #endif // CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_INHERIT
714 // -------------------------------------------------------------------------
715 // Priority ceiling support
717 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_CEILING
719 void Cyg_SchedThread::set_priority_ceiling( cyg_priority pri )
721 CYG_REPORT_FUNCTION();
723 CYG_ASSERT( mutex_count > 0, "Non-positive mutex count");
725 set_inherited_priority( pri );
729 void Cyg_SchedThread::clear_priority_ceiling( )
731 CYG_REPORT_FUNCTION();
733 CYG_ASSERT( mutex_count >= 0, "Non-positive mutex count");
735 clear_inherited_priority();
738 #endif // CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_CEILING
740 // -------------------------------------------------------------------------
741 // EOF sched/sched.cxx