]> git.kernelconcepts.de Git - karo-tx-redboot.git/blob - packages/kernel/v2_0/src/sched/sched.cxx
unified MX27, MX25, MX37 trees
[karo-tx-redboot.git] / packages / kernel / v2_0 / src / sched / sched.cxx
1 //==========================================================================
2 //
3 //      sched/sched.cxx
4 //
5 //      Scheduler class implementations
6 //
7 //==========================================================================
8 //####ECOSGPLCOPYRIGHTBEGIN####
9 // -------------------------------------------
10 // This file is part of eCos, the Embedded Configurable Operating System.
11 // Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.
12 //
13 // eCos is free software; you can redistribute it and/or modify it under
14 // the terms of the GNU General Public License as published by the Free
15 // Software Foundation; either version 2 or (at your option) any later version.
16 //
17 // eCos is distributed in the hope that it will be useful, but WITHOUT ANY
18 // WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
20 // for more details.
21 //
22 // You should have received a copy of the GNU General Public License along
23 // with eCos; if not, write to the Free Software Foundation, Inc.,
24 // 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25 //
26 // As a special exception, if other files instantiate templates or use macros
27 // or inline functions from this file, or you compile this file and link it
28 // with other works to produce a work based on this file, this file does not
29 // by itself cause the resulting work to be covered by the GNU General Public
30 // License. However the source code for this file must still be made available
31 // in accordance with section (3) of the GNU General Public License.
32 //
33 // This exception does not invalidate any other reasons why a work based on
34 // this file might be covered by the GNU General Public License.
35 //
36 // Alternative licenses for eCos may be arranged by contacting Red Hat, Inc.
37 // at http://sources.redhat.com/ecos/ecos-license/
38 // -------------------------------------------
39 //####ECOSGPLCOPYRIGHTEND####
40 //==========================================================================
41 //#####DESCRIPTIONBEGIN####
42 //
43 // Author(s):   nickg
44 // Contributors:        nickg
45 // Date:        1997-09-15
46 // Purpose:     Scheduler class implementation
47 // Description: This file contains the definitions of the scheduler class
48 //              member functions that are common to all scheduler
49 //              implementations.
50 //
51 //####DESCRIPTIONEND####
52 //
53 //==========================================================================
54
55 #include <pkgconf/kernel.h>
56
57 #include <cyg/kernel/ktypes.h>         // base kernel types
58 #include <cyg/infra/cyg_trac.h>        // tracing macros
59 #include <cyg/infra/cyg_ass.h>         // assertion macros
60 #include <cyg/kernel/instrmnt.h>       // instrumentation
61
62 #include <cyg/kernel/sched.hxx>        // our header
63
64 #include <cyg/kernel/thread.hxx>       // thread classes
65 #include <cyg/kernel/intr.hxx>         // Interrupt interface
66
67 #include <cyg/hal/hal_arch.h>          // Architecture specific definitions
68
69 #include <cyg/kernel/thread.inl>       // thread inlines
70 #include <cyg/kernel/sched.inl>        // scheduler inlines
71
72 //-------------------------------------------------------------------------
73 // Some local tracing control - a default.
74 #ifdef CYGDBG_USE_TRACING
75 # if !defined( CYGDBG_INFRA_DEBUG_TRACE_ASSERT_SIMPLE ) && \
76      !defined( CYGDBG_INFRA_DEBUG_TRACE_ASSERT_FANCY  )
77    // ie. not a tracing implementation that takes a long time to output
78
79 #  ifndef CYGDBG_KERNEL_TRACE_UNLOCK_INNER
80 #   define CYGDBG_KERNEL_TRACE_UNLOCK_INNER
81 #  endif // control not already defined
82
83 # endif  // trace implementation not ..._SIMPLE && not ..._FANCY
84 #endif   // CYGDBG_USE_TRACING
85
86 // -------------------------------------------------------------------------
87 // Static Cyg_Scheduler class members
88
89 // We start with sched_lock at 1 so that any kernel code we
90 // call during initialization will not try to reschedule.
91
92 CYGIMP_KERNEL_SCHED_LOCK_DEFINITIONS;
93
94 Cyg_Thread              *volatile Cyg_Scheduler_Base::current_thread[CYGNUM_KERNEL_CPU_MAX];
95
96 volatile cyg_bool       Cyg_Scheduler_Base::need_reschedule[CYGNUM_KERNEL_CPU_MAX];
97
98 Cyg_Scheduler           Cyg_Scheduler::scheduler CYG_INIT_PRIORITY( SCHEDULER );
99
100 volatile cyg_ucount32   Cyg_Scheduler_Base::thread_switches[CYGNUM_KERNEL_CPU_MAX];
101
102 #ifdef CYGPKG_KERNEL_SMP_SUPPORT
103
104 CYG_BYTE cyg_sched_cpu_interrupt[CYGNUM_KERNEL_CPU_MAX][sizeof(Cyg_Interrupt)]
105                                  CYGBLD_ANNOTATE_VARIABLE_SCHED;
106
107 __externC cyg_ISR cyg_hal_cpu_message_isr;
108 __externC cyg_DSR cyg_hal_cpu_message_dsr;
109
110 inline void *operator new(size_t size, void *ptr) { return ptr; };
111
112 #endif
113
114 // -------------------------------------------------------------------------
115 // Scheduler unlock function.
116
117 // This is only called when there is the potential for real work to be
118 // done. Other cases are handled in Cyg_Scheduler::unlock() which is
119 // an inline; _or_ this function may have been called from
120 // Cyg_Scheduler::reschedule(), or Cyg_Scheduler::unlock_reschedule. The
121 // new_lock argument contains the value that the scheduler lock should
122 // have after this function has completed. If it is zero then the lock is
123 // being released and some extra work (running ASRs, checking for DSRs) is
124 // done before returning. If it is non-zero then it must equal the
125 // current value of the lock, and is used to indicate that we want to
126 // reacquire the scheduler lock before returning. This latter option
127 // only makes any sense if the current thread is no longer runnable,
128 // e.g. sleeping, otherwise this function will do nothing.
129 // This approach of passing in the lock value at the end effectively
130 // makes the scheduler lock a form of per-thread variable. Each call
131 // to unlock_inner() carries with it the value the scheduler should
132 // have when it reschedules this thread back, and leaves this function.
133 // When it is non-zero, and the thread is rescheduled, no ASRS are run,
134 // or DSRs processed. By doing this, it makes it possible for threads
135 // that want to go to sleep to wake up with the scheduler lock in the
136 // same state it was in before.
137
138 void Cyg_Scheduler::unlock_inner( cyg_ucount32 new_lock )
139 {
140 #ifdef CYGDBG_KERNEL_TRACE_UNLOCK_INNER
141     CYG_REPORT_FUNCTION();
142 #endif    
143
144     do {
145
146         CYG_PRECONDITION( new_lock==0 ? get_sched_lock() == 1 :
147                           ((get_sched_lock() == new_lock) || (get_sched_lock() == new_lock+1)),
148                           "sched_lock not at expected value" );
149         
150 #ifdef CYGIMP_KERNEL_INTERRUPTS_DSRS
151         
152         // Call any pending DSRs. Do this here to ensure that any
153         // threads that get awakened are properly scheduled.
154
155         if( new_lock == 0 && Cyg_Interrupt::DSRs_pending() )
156             Cyg_Interrupt::call_pending_DSRs();
157 #endif
158
159         Cyg_Thread *current = get_current_thread();
160
161         CYG_ASSERTCLASS( current, "Bad current thread" );
162
163 #ifdef CYGFUN_KERNEL_ALL_THREADS_STACK_CHECKING
164         // should have  CYGVAR_KERNEL_THREADS_LIST
165         current = Cyg_Thread::get_list_head();
166         while ( current ) {
167             current->check_stack();
168             current = current->get_list_next();
169         }
170         current = get_current_thread();
171 #endif
172
173 #ifdef CYGFUN_KERNEL_THREADS_STACK_CHECKING
174         current->check_stack();
175 #endif
176
177         // If the current thread is going to sleep, or someone
178         // wants a reschedule, choose another thread to run
179
180         if( current->state != Cyg_Thread::RUNNING || get_need_reschedule() ) {
181
182             CYG_INSTRUMENT_SCHED(RESCHEDULE,0,0);
183             
184             // Get the next thread to run from scheduler
185             Cyg_Thread *next = scheduler.schedule();
186
187             CYG_CHECK_DATA_PTR( next, "Invalid next thread pointer");
188             CYG_ASSERTCLASS( next, "Bad next thread" );
189
190             if( current != next )
191             {
192
193                 CYG_INSTRUMENT_THREAD(SWITCH,current,next);
194
195                 // Count this thread switch
196                 thread_switches[CYG_KERNEL_CPU_THIS()]++;
197
198 #ifdef CYGFUN_KERNEL_THREADS_STACK_CHECKING
199                 next->check_stack(); // before running it
200 #endif
201                 current->timeslice_save();
202                 
203                 // Switch contexts
204                 HAL_THREAD_SWITCH_CONTEXT( &current->stack_ptr,
205                                            &next->stack_ptr );
206
207                 // Worry here about possible compiler
208                 // optimizations across the above call that may try to
209                 // propogate common subexpresions.  We would end up
210                 // with the expression from one thread in its
211                 // successor. This is only a worry if we do not save
212                 // and restore the complete register set. We need a
213                 // way of marking functions that return into a
214                 // different context. A temporary fix would be to
215                 // disable CSE (-fdisable-cse) in the compiler.
216                 
217                 // We return here only when the current thread is
218                 // rescheduled.  There is a bit of housekeeping to do
219                 // here before we are allowed to go on our way.
220
221                 CYG_CHECK_DATA_PTR( current, "Invalid current thread pointer");
222                 CYG_ASSERTCLASS( current, "Bad current thread" );
223
224                 current_thread[CYG_KERNEL_CPU_THIS()] = current;   // restore current thread pointer
225
226                 current->timeslice_restore();
227             }
228
229             clear_need_reschedule();    // finished rescheduling
230         }
231
232         if( new_lock == 0 )
233         {
234
235 #ifdef CYGSEM_KERNEL_SCHED_ASR_SUPPORT
236
237             // Check whether the ASR is pending and not inhibited.  If
238             // we can call it, then transfer this info to a local
239             // variable (call_asr) and clear the pending flag.  Note
240             // that we only do this if the scheduler lock is about to
241             // be zeroed. In any other circumstance we are not
242             // unlocking.
243
244             cyg_bool call_asr = false;
245             
246             if( (current->asr_inhibit == 0) && current->asr_pending )
247             {
248                 call_asr = true;
249                 current->asr_pending = false;
250             }
251 #endif
252             
253             HAL_REORDER_BARRIER(); // Make sure everything above has happened
254                                    // by this point
255             zero_sched_lock();     // Clear the lock
256             HAL_REORDER_BARRIER();
257                 
258 #ifdef CYGIMP_KERNEL_INTERRUPTS_DSRS
259
260             // Now check whether any DSRs got posted during the thread
261             // switch and if so, go around again. Making this test after
262             // the lock has been zeroed avoids a race condition in which
263             // a DSR could have been posted during a reschedule, but would
264             // not be run until the _next_ time we release the sched lock.
265
266             if( Cyg_Interrupt::DSRs_pending() ) {
267                 inc_sched_lock();   // reclaim the lock
268                 continue;           // go back to head of loop
269             }
270
271 #endif
272             // Otherwise the lock is zero, we can return.
273
274 //            CYG_POSTCONDITION( get_sched_lock() == 0, "sched_lock not zero" );
275
276 #ifdef CYGSEM_KERNEL_SCHED_ASR_SUPPORT
277             // If the test within the sched_lock indicating that the ASR
278             // be called was true, call it here. Calling the ASR must be
279             // the very last thing we do here, since it must run as close
280             // to "user" state as possible.
281         
282             if( call_asr ) current->asr(current->asr_data);
283 #endif
284
285         }
286         else
287         {
288             // If new_lock is non-zero then we restore the sched_lock to
289             // the value given.
290             
291             HAL_REORDER_BARRIER();
292             
293             set_sched_lock(new_lock);
294             
295             HAL_REORDER_BARRIER();            
296         }
297         
298 #ifdef CYGDBG_KERNEL_TRACE_UNLOCK_INNER
299         CYG_REPORT_RETURN();
300 #endif
301         return;
302
303     } while( 1 );
304
305     CYG_FAIL( "Should not be executed" );
306 }
307
308 // -------------------------------------------------------------------------
309 // Thread startup. This is called from Cyg_Thread::thread_entry() and
310 // performs some housekeeping for a newly started thread.
311
312 void Cyg_Scheduler::thread_entry( Cyg_Thread *thread )
313 {
314     clear_need_reschedule();            // finished rescheduling
315     set_current_thread(thread);         // restore current thread pointer
316
317     CYG_INSTRUMENT_THREAD(ENTER,thread,0);
318
319     thread->timeslice_reset();
320     thread->timeslice_restore();
321     
322     // Finally unlock the scheduler. As well as clearing the scheduler
323     // lock this allows any pending DSRs to execute. The new thread
324     // must start with a lock of zero, so we keep unlocking until the
325     // lock reaches zero.
326     while( get_sched_lock() != 0 )
327         unlock();    
328 }
329
330 // -------------------------------------------------------------------------
331 // Start the scheduler. This is called after the initial threads have been
332 // created to start scheduling. It gets any other CPUs running, and then
333 // enters the scheduler.
334
335 void Cyg_Scheduler::start()
336 {
337     CYG_REPORT_FUNCTION();
338
339 #ifdef CYGPKG_KERNEL_SMP_SUPPORT
340
341     HAL_SMP_CPU_TYPE cpu;
342     
343     for( cpu = 0; cpu < CYG_KERNEL_CPU_COUNT(); cpu++ )
344     {
345         // Don't start this CPU, it is running already!
346         if( cpu == CYG_KERNEL_CPU_THIS() )
347             continue;
348
349         CYG_KERNEL_CPU_START( cpu );
350     }
351
352 #endif    
353     
354     start_cpu();
355 }
356
357 // -------------------------------------------------------------------------
358 // Start scheduling on this CPU. This is called on each CPU in the system
359 // when it is started.
360
361 void Cyg_Scheduler::start_cpu()
362 {
363     CYG_REPORT_FUNCTION();
364
365 #ifdef CYGPKG_KERNEL_SMP_SUPPORT
366
367     // Set up the inter-CPU interrupt for this CPU
368
369     Cyg_Interrupt * intr = new( (void *)&cyg_sched_cpu_interrupt[HAL_SMP_CPU_THIS()] )
370         Cyg_Interrupt( CYGNUM_HAL_SMP_CPU_INTERRUPT_VECTOR( HAL_SMP_CPU_THIS() ),
371                        0,
372                        0,
373                        cyg_hal_cpu_message_isr,
374                        cyg_hal_cpu_message_dsr
375                      );
376
377     intr->set_cpu( intr->get_vector(), HAL_SMP_CPU_THIS() );
378     
379     intr->attach();
380
381     intr->unmask_interrupt( intr->get_vector() );
382     
383 #endif    
384     
385     // Get the first thread to run from scheduler
386     register Cyg_Thread *next = scheduler.schedule();
387
388     CYG_ASSERTCLASS( next, "Bad initial thread" );
389
390     clear_need_reschedule();            // finished rescheduling
391     set_current_thread(next);           // restore current thread pointer
392
393 #ifdef CYGVAR_KERNEL_COUNTERS_CLOCK
394     // Reference the real time clock. This ensures that at least one
395     // reference to the kernel_clock.o object exists, without which
396     // the object will not be included while linking.
397     CYG_REFERENCE_OBJECT( Cyg_Clock::real_time_clock );
398 #endif
399
400     // Load the first thread. This will also enable interrupts since
401     // the initial state of all threads is to have interrupts enabled.
402     
403     HAL_THREAD_LOAD_CONTEXT( &next->stack_ptr );    
404     
405 }
406
407 // -------------------------------------------------------------------------
408 // SMP support functions
409
410 #ifdef CYGPKG_KERNEL_SMP_SUPPORT
411
412 // This is called on each secondary CPU on its interrupt stack after
413 // the initial CPU has initialized the world.
414
415 externC void cyg_kernel_smp_startup()
416 {
417     CYG_INSTRUMENT_SMP( CPU_START, CYG_KERNEL_CPU_THIS(), 0 );
418     Cyg_Scheduler::lock();
419     Cyg_Scheduler::start_cpu();
420 }
421
422 // This is called from the DSR of the inter-CPU interrupt to cause a
423 // reschedule when the scheduler lock is zeroed.
424
425 __externC void cyg_scheduler_set_need_reschedule()
426 {
427     CYG_INSTRUMENT_SMP( RESCHED_RECV, 0, 0 );    
428     Cyg_Scheduler::need_reschedule[HAL_SMP_CPU_THIS()] = true;
429 }
430
431 #endif
432
433 // -------------------------------------------------------------------------
434 // Consistency checker
435
436 #ifdef CYGDBG_USE_ASSERTS
437
438 cyg_bool Cyg_Scheduler::check_this( cyg_assert_class_zeal zeal) const
439 {
440     CYG_REPORT_FUNCTION();
441         
442     // check that we have a non-NULL pointer first
443     if( this == NULL ) return false;
444     
445     switch( zeal )
446     {
447     case cyg_system_test:
448     case cyg_extreme:
449     case cyg_thorough:
450         if( !get_current_thread()->check_this(zeal) ) return false;
451     case cyg_quick:
452     case cyg_trivial:
453     case cyg_none:
454     default:
455         break;
456     };
457
458     return true;
459 }
460
461 #endif
462
463 //==========================================================================
464 // SchedThread members
465
466 // -------------------------------------------------------------------------
467 // Static data members
468
469 #ifdef CYGSEM_KERNEL_SCHED_ASR_SUPPORT
470
471 # ifdef CYGSEM_KERNEL_SCHED_ASR_GLOBAL
472 Cyg_ASR *Cyg_SchedThread::asr = &Cyg_SchedThread::asr_default;
473 # endif
474
475 # ifdef CYGSEM_KERNEL_SCHED_ASR_DATA_GLOBAL
476 CYG_ADDRWORD Cyg_SchedThread::asr_data = 0;
477 # endif
478
479 #endif // CYGSEM_KERNEL_SCHED_ASR_SUPPORT
480
481 // -------------------------------------------------------------------------
482 // Constructor
483
484 Cyg_SchedThread::Cyg_SchedThread(Cyg_Thread *thread, CYG_ADDRWORD sched_info)
485 : Cyg_SchedThread_Implementation(sched_info)
486 {
487     CYG_REPORT_FUNCTION();
488         
489     queue = NULL;
490
491 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL
492
493     mutex_count = 0;
494     
495 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_SIMPLE
496     
497     priority_inherited = false;
498     
499 #endif
500 #endif
501
502 #ifdef CYGSEM_KERNEL_SCHED_ASR_SUPPORT
503
504     asr_inhibit = 0;
505     asr_pending = false;
506
507 #ifndef CYGSEM_KERNEL_SCHED_ASR_GLOBAL
508     asr = asr_default;
509 #endif
510 #ifdef CYGSEM_KERNEL_SCHED_ASR_DATA_GLOBAL
511     asr_data = NULL
512 #endif        
513     
514 #endif    
515 }
516
517 // -------------------------------------------------------------------------
518 // ASR support functions
519
520 #ifdef CYGSEM_KERNEL_SCHED_ASR_SUPPORT
521
522 // -------------------------------------------------------------------------
523 // Set ASR
524 // Install a new ASR, returning the old one.
525
526 void Cyg_SchedThread::set_asr( Cyg_ASR  *new_asr, CYG_ADDRWORD  new_data,
527                   Cyg_ASR **old_asr, CYG_ADDRWORD *old_data)
528 {
529     CYG_REPORT_FUNCTION();
530
531     // Do this with the scheduler locked...
532     Cyg_Scheduler::lock();
533
534     if( old_asr != NULL ) *old_asr = asr;
535     if( old_data != NULL ) *old_data = asr_data;
536
537     // If new_asr is NULL, do not change the ASR,
538     // but only change the data.
539     if( new_asr != NULL ) asr = new_asr;
540     asr_data = new_data;
541     
542     Cyg_Scheduler::unlock();
543 }
544
545 // -------------------------------------------------------------------------
546 // Clear ASR
547
548 void Cyg_SchedThread::clear_asr()
549 {
550     CYG_REPORT_FUNCTION();
551
552     // Do this with the scheduler locked...
553     Cyg_Scheduler::lock();
554
555     // Reset ASR to default.
556     asr = asr_default;
557     asr_data = 0;
558     
559     Cyg_Scheduler::unlock();    
560 }
561
562 // -------------------------------------------------------------------------
563 // Default ASR function.
564 // having this avoids our having to worry about ever seeing a NULL
565 // pointer as the ASR function.
566
567 void Cyg_SchedThread::asr_default(CYG_ADDRWORD data)
568 {
569     CYG_REPORT_FUNCTION();
570
571     data=data;
572     return;
573 }
574
575 #endif
576
577 // -------------------------------------------------------------------------
578 // Generic priority protocol support
579
580 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL
581
582 void Cyg_SchedThread::set_inherited_priority( cyg_priority pri, Cyg_Thread *thread )
583 {
584     CYG_REPORT_FUNCTION();
585
586 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_SIMPLE
587
588     // This is the comon code for priority inheritance and ceiling
589     // protocols. This implementation provides a simplified version of
590     // the protocol.
591     
592     Cyg_Thread *self = CYG_CLASSFROMBASE(Cyg_Thread,
593                                          Cyg_SchedThread,
594                                          this);
595
596     CYG_ASSERT( mutex_count > 0, "Non-positive mutex count");
597     
598     // Compare with *current* priority in case thread has already
599     // inherited - for relay case below.
600     if( pri < priority )
601     {
602         cyg_priority mypri = priority;
603         cyg_bool already_inherited = priority_inherited;
604
605         // If this is first inheritance, copy the old pri
606         // and set inherited flag. We clear it before setting the
607         // pri since set_priority() is inheritance aware.
608         // This is called with the sched locked, so no race conditions.
609
610         priority_inherited = false;     // so that set_prio DTRT
611
612         self->set_priority( pri );            
613
614         if( !already_inherited )
615             original_priority = mypri;
616
617         priority_inherited = true;      // regardless, because it is now
618
619     }
620
621 #endif
622 }
623
624 void Cyg_SchedThread::relay_inherited_priority( Cyg_Thread *ex_owner, Cyg_ThreadQueue *pqueue)
625 {
626     CYG_REPORT_FUNCTION();
627
628 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_SIMPLE
629
630     // A simple implementation of priority inheritance.
631     // At its simplest, this member does nothing.
632
633     // If there is anyone else waiting, then the *new* owner inherits from
634     // the current one, since that is a maxima of the others waiting.
635     // (It's worth not doing if there's nobody waiting to prevent
636     // unneccessary priority skew.)  This could be viewed as a discovered
637     // priority ceiling.
638
639     if ( !pqueue->empty() )
640         set_inherited_priority( ex_owner->get_current_priority(), ex_owner );
641
642 #endif
643 }
644
645 void Cyg_SchedThread::clear_inherited_priority()
646 {
647     CYG_REPORT_FUNCTION();
648
649 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_SIMPLE
650
651     // A simple implementation of priority inheritance/ceiling
652     // protocols.  The simplification in this algorithm is that we do
653     // not reduce our priority until we have freed all mutexes
654     // claimed. Hence we can continue to run at an artificially high
655     // priority even when we should not.  However, since nested
656     // mutexes are rare, the thread we have inherited from is likely
657     // to be locking the same mutexes we are, and mutex claim periods
658     // should be very short, the performance difference between this
659     // and a more complex algorithm should be negligible. The most
660     // important advantage of this algorithm is that it is fast and
661     // deterministic.
662     
663     Cyg_Thread *self = CYG_CLASSFROMBASE(Cyg_Thread,
664                                          Cyg_SchedThread,
665                                          this);
666
667     CYG_ASSERT( mutex_count >= 0, "Non-positive mutex count");
668     
669     if( mutex_count == 0 && priority_inherited )
670     {
671         priority_inherited = false;
672
673         // Only make an effort if the priority must change
674         if( priority < original_priority )
675             self->set_priority( original_priority );
676         
677     }
678     
679 #endif        
680 }
681
682 #endif // CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL
683
684 // -------------------------------------------------------------------------
685 // Priority inheritance support.
686
687 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_INHERIT
688
689 // -------------------------------------------------------------------------
690 // Inherit the priority of the provided thread if it
691 // has a higher priority than ours.
692
693 void Cyg_SchedThread::inherit_priority( Cyg_Thread *thread)
694 {
695     CYG_REPORT_FUNCTION();
696
697     Cyg_Thread *self = CYG_CLASSFROMBASE(Cyg_Thread,
698                                          Cyg_SchedThread,
699                                          this);
700
701     CYG_ASSERT( mutex_count > 0, "Non-positive mutex count");
702     CYG_ASSERT( self != thread, "Trying to inherit from self!");
703
704     self->set_inherited_priority( thread->get_current_priority(), thread );
705     
706 }
707
708 // -------------------------------------------------------------------------
709 // Inherit the priority of the ex-owner thread or from the queue if it
710 // has a higher priority than ours.
711
712 void Cyg_SchedThread::relay_priority( Cyg_Thread *ex_owner, Cyg_ThreadQueue *pqueue)
713 {
714     CYG_REPORT_FUNCTION();
715
716     relay_inherited_priority( ex_owner, pqueue );
717 }
718
719 // -------------------------------------------------------------------------
720 // Lose a priority inheritance
721
722 void Cyg_SchedThread::disinherit_priority()
723 {
724     CYG_REPORT_FUNCTION();
725
726     CYG_ASSERT( mutex_count >= 0, "Non-positive mutex count");
727
728     clear_inherited_priority();
729 }
730
731 #endif // CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_INHERIT
732
733 // -------------------------------------------------------------------------
734 // Priority ceiling support
735
736 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_CEILING
737
738 void Cyg_SchedThread::set_priority_ceiling( cyg_priority pri )
739 {
740     CYG_REPORT_FUNCTION();
741
742     CYG_ASSERT( mutex_count > 0, "Non-positive mutex count");
743
744     set_inherited_priority( pri );
745
746 }
747
748 void Cyg_SchedThread::clear_priority_ceiling( )
749 {
750     CYG_REPORT_FUNCTION();
751
752     CYG_ASSERT( mutex_count >= 0, "Non-positive mutex count");
753
754     clear_inherited_priority();
755 }
756
757 #endif // CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_CEILING
758
759 // -------------------------------------------------------------------------
760 // EOF sched/sched.cxx