]> git.kernelconcepts.de Git - karo-tx-redboot.git/blob - packages/kernel/v2_0/src/sched/sched.cxx
Initial revision
[karo-tx-redboot.git] / packages / kernel / v2_0 / src / sched / sched.cxx
1 //==========================================================================
2 //
3 //      sched/sched.cxx
4 //
5 //      Scheduler class implementations
6 //
7 //==========================================================================
8 //####ECOSGPLCOPYRIGHTBEGIN####
9 // -------------------------------------------
10 // This file is part of eCos, the Embedded Configurable Operating System.
11 // Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.
12 //
13 // eCos is free software; you can redistribute it and/or modify it under
14 // the terms of the GNU General Public License as published by the Free
15 // Software Foundation; either version 2 or (at your option) any later version.
16 //
17 // eCos is distributed in the hope that it will be useful, but WITHOUT ANY
18 // WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
20 // for more details.
21 //
22 // You should have received a copy of the GNU General Public License along
23 // with eCos; if not, write to the Free Software Foundation, Inc.,
24 // 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25 //
26 // As a special exception, if other files instantiate templates or use macros
27 // or inline functions from this file, or you compile this file and link it
28 // with other works to produce a work based on this file, this file does not
29 // by itself cause the resulting work to be covered by the GNU General Public
30 // License. However the source code for this file must still be made available
31 // in accordance with section (3) of the GNU General Public License.
32 //
33 // This exception does not invalidate any other reasons why a work based on
34 // this file might be covered by the GNU General Public License.
35 //
36 // Alternative licenses for eCos may be arranged by contacting Red Hat, Inc.
37 // at http://sources.redhat.com/ecos/ecos-license/
38 // -------------------------------------------
39 //####ECOSGPLCOPYRIGHTEND####
40 //==========================================================================
41 //#####DESCRIPTIONBEGIN####
42 //
43 // Author(s):   nickg
44 // Contributors:        nickg
45 // Date:        1997-09-15
46 // Purpose:     Scheduler class implementation
47 // Description: This file contains the definitions of the scheduler class
48 //              member functions that are common to all scheduler
49 //              implementations.
50 //
51 //####DESCRIPTIONEND####
52 //
53 //==========================================================================
54
55 #include <pkgconf/kernel.h>
56
57 #include <cyg/kernel/ktypes.h>         // base kernel types
58 #include <cyg/infra/cyg_trac.h>        // tracing macros
59 #include <cyg/infra/cyg_ass.h>         // assertion macros
60 #include <cyg/kernel/instrmnt.h>       // instrumentation
61
62 #include <cyg/kernel/sched.hxx>        // our header
63
64 #include <cyg/kernel/thread.hxx>       // thread classes
65 #include <cyg/kernel/intr.hxx>         // Interrupt interface
66
67 #include <cyg/hal/hal_arch.h>          // Architecture specific definitions
68
69 #include <cyg/kernel/thread.inl>       // thread inlines
70 #include <cyg/kernel/sched.inl>        // scheduler inlines
71
72 //-------------------------------------------------------------------------
73 // Some local tracing control - a default.
74 #ifdef CYGDBG_USE_TRACING
75 # if !defined( CYGDBG_INFRA_DEBUG_TRACE_ASSERT_SIMPLE ) && \
76      !defined( CYGDBG_INFRA_DEBUG_TRACE_ASSERT_FANCY  )
77    // ie. not a tracing implementation that takes a long time to output
78
79 #  ifndef CYGDBG_KERNEL_TRACE_UNLOCK_INNER
80 #   define CYGDBG_KERNEL_TRACE_UNLOCK_INNER
81 #  endif // control not already defined
82
83 # endif  // trace implementation not ..._SIMPLE && not ..._FANCY
84 #endif   // CYGDBG_USE_TRACING
85
86 // -------------------------------------------------------------------------
87 // Static Cyg_Scheduler class members
88
89 // We start with sched_lock at 1 so that any kernel code we
90 // call during initialization will not try to reschedule.
91
92 CYGIMP_KERNEL_SCHED_LOCK_DEFINITIONS;
93
94 Cyg_Thread              *volatile Cyg_Scheduler_Base::current_thread[CYGNUM_KERNEL_CPU_MAX];
95
96 volatile cyg_bool       Cyg_Scheduler_Base::need_reschedule[CYGNUM_KERNEL_CPU_MAX];
97
98 Cyg_Scheduler           Cyg_Scheduler::scheduler CYG_INIT_PRIORITY( SCHEDULER );
99
100 volatile cyg_ucount32   Cyg_Scheduler_Base::thread_switches[CYGNUM_KERNEL_CPU_MAX];
101
102 #ifdef CYGPKG_KERNEL_SMP_SUPPORT
103
104 CYG_BYTE cyg_sched_cpu_interrupt[CYGNUM_KERNEL_CPU_MAX][sizeof(Cyg_Interrupt)]
105                                  CYGBLD_ANNOTATE_VARIABLE_SCHED;
106
107 __externC cyg_ISR cyg_hal_cpu_message_isr;
108 __externC cyg_DSR cyg_hal_cpu_message_dsr;
109
110 inline void *operator new(size_t size, void *ptr) { return ptr; };
111
112 #endif
113
114 // -------------------------------------------------------------------------
115 // Scheduler unlock function.
116
117 // This is only called when there is the potential for real work to be
118 // done. Other cases are handled in Cyg_Scheduler::unlock() which is
119 // an inline; _or_ this function may have been called from
120 // Cyg_Scheduler::reschedule(), or Cyg_Scheduler::unlock_reschedule. The
121 // new_lock argument contains the value that the scheduler lock should
122 // have after this function has completed. If it is zero then the lock is
123 // being released and some extra work (running ASRs, checking for DSRs) is
124 // done before returning. If it is non-zero then it must equal the
125 // current value of the lock, and is used to indicate that we want to
126 // reacquire the scheduler lock before returning. This latter option
127 // only makes any sense if the current thread is no longer runnable,
128 // e.g. sleeping, otherwise this function will do nothing.
129 // This approach of passing in the lock value at the end effectively
130 // makes the scheduler lock a form of per-thread variable. Each call
131 // to unlock_inner() carries with it the value the scheduler should
132 // have when it reschedules this thread back, and leaves this function.
133 // When it is non-zero, and the thread is rescheduled, no ASRS are run,
134 // or DSRs processed. By doing this, it makes it possible for threads
135 // that want to go to sleep to wake up with the scheduler lock in the
136 // same state it was in before.
137
138 void Cyg_Scheduler::unlock_inner( cyg_ucount32 new_lock )
139 {
140 #ifdef CYGDBG_KERNEL_TRACE_UNLOCK_INNER
141     CYG_REPORT_FUNCTION();
142 #endif    
143
144     do {
145
146         CYG_PRECONDITION( new_lock==0 ? get_sched_lock() == 1 :
147                           ((get_sched_lock() == new_lock) || (get_sched_lock() == new_lock+1)),
148                           "sched_lock not at expected value" );
149         
150 #ifdef CYGIMP_KERNEL_INTERRUPTS_DSRS
151         
152         // Call any pending DSRs. Do this here to ensure that any
153         // threads that get awakened are properly scheduled.
154
155         if( new_lock == 0 && Cyg_Interrupt::DSRs_pending() )
156             Cyg_Interrupt::call_pending_DSRs();
157 #endif
158
159         Cyg_Thread *current = get_current_thread();
160
161         CYG_ASSERTCLASS( current, "Bad current thread" );
162
163 #ifdef CYGFUN_KERNEL_ALL_THREADS_STACK_CHECKING
164         // should have  CYGVAR_KERNEL_THREADS_LIST
165         current = Cyg_Thread::get_list_head();
166         while ( current ) {
167             current->check_stack();
168             current = current->get_list_next();
169         }
170         current = get_current_thread();
171 #endif
172
173 #ifdef CYGFUN_KERNEL_THREADS_STACK_CHECKING
174         current->check_stack();
175 #endif
176
177         // If the current thread is going to sleep, or someone
178         // wants a reschedule, choose another thread to run
179
180         if( current->state != Cyg_Thread::RUNNING || get_need_reschedule() ) {
181
182             CYG_INSTRUMENT_SCHED(RESCHEDULE,0,0);
183             
184             // Get the next thread to run from scheduler
185             Cyg_Thread *next = scheduler.schedule();
186
187             CYG_CHECK_DATA_PTR( next, "Invalid next thread pointer");
188             CYG_ASSERTCLASS( next, "Bad next thread" );
189
190             if( current != next )
191             {
192
193                 CYG_INSTRUMENT_THREAD(SWITCH,current,next);
194
195                 // Count this thread switch
196                 thread_switches[CYG_KERNEL_CPU_THIS()]++;
197
198 #ifdef CYGFUN_KERNEL_THREADS_STACK_CHECKING
199                 next->check_stack(); // before running it
200 #endif
201
202                 // Switch contexts
203                 HAL_THREAD_SWITCH_CONTEXT( &current->stack_ptr,
204                                            &next->stack_ptr );
205
206                 // Worry here about possible compiler
207                 // optimizations across the above call that may try to
208                 // propogate common subexpresions.  We would end up
209                 // with the expression from one thread in its
210                 // successor. This is only a worry if we do not save
211                 // and restore the complete register set. We need a
212                 // way of marking functions that return into a
213                 // different context. A temporary fix would be to
214                 // disable CSE (-fdisable-cse) in the compiler.
215                 
216                 // We return here only when the current thread is
217                 // rescheduled.  There is a bit of housekeeping to do
218                 // here before we are allowed to go on our way.
219
220                 CYG_CHECK_DATA_PTR( current, "Invalid current thread pointer");
221                 CYG_ASSERTCLASS( current, "Bad current thread" );
222
223                 current_thread[CYG_KERNEL_CPU_THIS()] = current;   // restore current thread pointer
224             }
225
226 #ifdef CYGSEM_KERNEL_SCHED_TIMESLICE
227             // Reset the timeslice counter so that this thread gets a full
228             // quantum. 
229             reset_timeslice_count();
230 #endif
231
232             clear_need_reschedule();    // finished rescheduling
233         }
234
235         if( new_lock == 0 )
236         {
237
238 #ifdef CYGSEM_KERNEL_SCHED_ASR_SUPPORT
239
240             // Check whether the ASR is pending and not inhibited.  If
241             // we can call it, then transfer this info to a local
242             // variable (call_asr) and clear the pending flag.  Note
243             // that we only do this if the scheduler lock is about to
244             // be zeroed. In any other circumstance we are not
245             // unlocking.
246
247             cyg_bool call_asr = false;
248             
249             if( (current->asr_inhibit == 0) && current->asr_pending )
250             {
251                 call_asr = true;
252                 current->asr_pending = false;
253             }
254 #endif
255             
256             HAL_REORDER_BARRIER(); // Make sure everything above has happened
257                                    // by this point
258             zero_sched_lock();     // Clear the lock
259             HAL_REORDER_BARRIER();
260                 
261 #ifdef CYGIMP_KERNEL_INTERRUPTS_DSRS
262
263             // Now check whether any DSRs got posted during the thread
264             // switch and if so, go around again. Making this test after
265             // the lock has been zeroed avoids a race condition in which
266             // a DSR could have been posted during a reschedule, but would
267             // not be run until the _next_ time we release the sched lock.
268
269             if( Cyg_Interrupt::DSRs_pending() ) {
270                 inc_sched_lock();   // reclaim the lock
271                 continue;           // go back to head of loop
272             }
273
274 #endif
275             // Otherwise the lock is zero, we can return.
276
277 //            CYG_POSTCONDITION( get_sched_lock() == 0, "sched_lock not zero" );
278
279 #ifdef CYGSEM_KERNEL_SCHED_ASR_SUPPORT
280             // If the test within the sched_lock indicating that the ASR
281             // be called was true, call it here. Calling the ASR must be
282             // the very last thing we do here, since it must run as close
283             // to "user" state as possible.
284         
285             if( call_asr ) current->asr(current->asr_data);
286 #endif
287
288         }
289         else
290         {
291             // If new_lock is non-zero then we restore the sched_lock to
292             // the value given.
293             
294             HAL_REORDER_BARRIER();
295             
296             set_sched_lock(new_lock);
297             
298             HAL_REORDER_BARRIER();            
299         }
300         
301 #ifdef CYGDBG_KERNEL_TRACE_UNLOCK_INNER
302         CYG_REPORT_RETURN();
303 #endif
304         return;
305
306     } while( 1 );
307
308     CYG_FAIL( "Should not be executed" );
309 }
310
311 // -------------------------------------------------------------------------
312 // Start the scheduler. This is called after the initial threads have been
313 // created to start scheduling. It gets any other CPUs running, and then
314 // enters the scheduler.
315
316 void Cyg_Scheduler::start()
317 {
318     CYG_REPORT_FUNCTION();
319
320 #ifdef CYGPKG_KERNEL_SMP_SUPPORT
321
322     HAL_SMP_CPU_TYPE cpu;
323     
324     for( cpu = 0; cpu < CYG_KERNEL_CPU_COUNT(); cpu++ )
325     {
326         // Don't start this CPU, it is running already!
327         if( cpu == CYG_KERNEL_CPU_THIS() )
328             continue;
329
330         CYG_KERNEL_CPU_START( cpu );
331     }
332
333 #endif    
334     
335     start_cpu();
336 }
337
338 // -------------------------------------------------------------------------
339 // Start scheduling on this CPU. This is called on each CPU in the system
340 // when it is started.
341
342 void Cyg_Scheduler::start_cpu()
343 {
344     CYG_REPORT_FUNCTION();
345
346 #ifdef CYGPKG_KERNEL_SMP_SUPPORT
347
348     // Set up the inter-CPU interrupt for this CPU
349
350     Cyg_Interrupt * intr = new( (void *)&cyg_sched_cpu_interrupt[HAL_SMP_CPU_THIS()] )
351         Cyg_Interrupt( CYGNUM_HAL_SMP_CPU_INTERRUPT_VECTOR( HAL_SMP_CPU_THIS() ),
352                        0,
353                        0,
354                        cyg_hal_cpu_message_isr,
355                        cyg_hal_cpu_message_dsr
356                      );
357
358     intr->set_cpu( intr->get_vector(), HAL_SMP_CPU_THIS() );
359     
360     intr->attach();
361
362     intr->unmask_interrupt( intr->get_vector() );
363     
364 #endif    
365     
366     // Get the first thread to run from scheduler
367     register Cyg_Thread *next = scheduler.schedule();
368
369     CYG_ASSERTCLASS( next, "Bad initial thread" );
370
371     clear_need_reschedule();            // finished rescheduling
372     set_current_thread(next);           // restore current thread pointer
373
374 #ifdef CYGVAR_KERNEL_COUNTERS_CLOCK
375     // Reference the real time clock. This ensures that at least one
376     // reference to the kernel_clock.o object exists, without which
377     // the object will not be included while linking.
378     CYG_REFERENCE_OBJECT( Cyg_Clock::real_time_clock );
379 #endif
380
381     // Load the first thread. This will also enable interrupts since
382     // the initial state of all threads is to have interrupts enabled.
383     
384     HAL_THREAD_LOAD_CONTEXT( &next->stack_ptr );    
385     
386 }
387
388 // -------------------------------------------------------------------------
389 // SMP support functions
390
391 #ifdef CYGPKG_KERNEL_SMP_SUPPORT
392
393 // This is called on each secondary CPU on its interrupt stack after
394 // the initial CPU has initialized the world.
395
396 externC void cyg_kernel_smp_startup()
397 {
398     CYG_INSTRUMENT_SMP( CPU_START, CYG_KERNEL_CPU_THIS(), 0 );
399     Cyg_Scheduler::lock();
400     Cyg_Scheduler::start_cpu();
401 }
402
403 // This is called from the DSR of the inter-CPU interrupt to cause a
404 // reschedule when the scheduler lock is zeroed.
405
406 __externC void cyg_scheduler_set_need_reschedule()
407 {
408     CYG_INSTRUMENT_SMP( RESCHED_RECV, 0, 0 );    
409     Cyg_Scheduler::need_reschedule[HAL_SMP_CPU_THIS()] = true;
410 }
411
412 #endif
413
414 // -------------------------------------------------------------------------
415 // Consistency checker
416
417 #ifdef CYGDBG_USE_ASSERTS
418
419 cyg_bool Cyg_Scheduler::check_this( cyg_assert_class_zeal zeal) const
420 {
421     CYG_REPORT_FUNCTION();
422         
423     // check that we have a non-NULL pointer first
424     if( this == NULL ) return false;
425     
426     switch( zeal )
427     {
428     case cyg_system_test:
429     case cyg_extreme:
430     case cyg_thorough:
431         if( !get_current_thread()->check_this(zeal) ) return false;
432     case cyg_quick:
433     case cyg_trivial:
434     case cyg_none:
435     default:
436         break;
437     };
438
439     return true;
440 }
441
442 #endif
443
444 //==========================================================================
445 // SchedThread members
446
447 // -------------------------------------------------------------------------
448 // Static data members
449
450 #ifdef CYGSEM_KERNEL_SCHED_ASR_SUPPORT
451
452 # ifdef CYGSEM_KERNEL_SCHED_ASR_GLOBAL
453 Cyg_ASR *Cyg_SchedThread::asr = &Cyg_SchedThread::asr_default;
454 # endif
455
456 # ifdef CYGSEM_KERNEL_SCHED_ASR_DATA_GLOBAL
457 CYG_ADDRWORD Cyg_SchedThread::asr_data = 0;
458 # endif
459
460 #endif // CYGSEM_KERNEL_SCHED_ASR_SUPPORT
461
462 // -------------------------------------------------------------------------
463 // Constructor
464
465 Cyg_SchedThread::Cyg_SchedThread(Cyg_Thread *thread, CYG_ADDRWORD sched_info)
466 : Cyg_SchedThread_Implementation(sched_info)
467 {
468     CYG_REPORT_FUNCTION();
469         
470     queue = NULL;
471
472 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL
473
474     mutex_count = 0;
475     
476 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_SIMPLE
477     
478     priority_inherited = false;
479     
480 #endif
481 #endif
482
483 #ifdef CYGSEM_KERNEL_SCHED_ASR_SUPPORT
484
485     asr_inhibit = 0;
486     asr_pending = false;
487
488 #ifndef CYGSEM_KERNEL_SCHED_ASR_GLOBAL
489     asr = asr_default;
490 #endif
491 #ifdef CYGSEM_KERNEL_SCHED_ASR_DATA_GLOBAL
492     asr_data = NULL
493 #endif        
494     
495 #endif    
496 }
497
498 // -------------------------------------------------------------------------
499 // ASR support functions
500
501 #ifdef CYGSEM_KERNEL_SCHED_ASR_SUPPORT
502
503 // -------------------------------------------------------------------------
504 // Set ASR
505 // Install a new ASR, returning the old one.
506
507 void Cyg_SchedThread::set_asr( Cyg_ASR  *new_asr, CYG_ADDRWORD  new_data,
508                   Cyg_ASR **old_asr, CYG_ADDRWORD *old_data)
509 {
510     CYG_REPORT_FUNCTION();
511
512     // Do this with the scheduler locked...
513     Cyg_Scheduler::lock();
514
515     if( old_asr != NULL ) *old_asr = asr;
516     if( old_data != NULL ) *old_data = asr_data;
517
518     // If new_asr is NULL, do not change the ASR,
519     // but only change the data.
520     if( new_asr != NULL ) asr = new_asr;
521     asr_data = new_data;
522     
523     Cyg_Scheduler::unlock();
524 }
525
526 // -------------------------------------------------------------------------
527 // Clear ASR
528
529 void Cyg_SchedThread::clear_asr()
530 {
531     CYG_REPORT_FUNCTION();
532
533     // Do this with the scheduler locked...
534     Cyg_Scheduler::lock();
535
536     // Reset ASR to default.
537     asr = asr_default;
538     asr_data = 0;
539     
540     Cyg_Scheduler::unlock();    
541 }
542
543 // -------------------------------------------------------------------------
544 // Default ASR function.
545 // having this avoids our having to worry about ever seeing a NULL
546 // pointer as the ASR function.
547
548 void Cyg_SchedThread::asr_default(CYG_ADDRWORD data)
549 {
550     CYG_REPORT_FUNCTION();
551
552     data=data;
553     return;
554 }
555
556 #endif
557
558 // -------------------------------------------------------------------------
559 // Generic priority protocol support
560
561 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL
562
563 void Cyg_SchedThread::set_inherited_priority( cyg_priority pri, Cyg_Thread *thread )
564 {
565     CYG_REPORT_FUNCTION();
566
567 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_SIMPLE
568
569     // This is the comon code for priority inheritance and ceiling
570     // protocols. This implementation provides a simplified version of
571     // the protocol.
572     
573     Cyg_Thread *self = CYG_CLASSFROMBASE(Cyg_Thread,
574                                          Cyg_SchedThread,
575                                          this);
576
577     CYG_ASSERT( mutex_count > 0, "Non-positive mutex count");
578     
579     // Compare with *current* priority in case thread has already
580     // inherited - for relay case below.
581     if( pri < priority )
582     {
583         cyg_priority mypri = priority;
584         cyg_bool already_inherited = priority_inherited;
585
586         // If this is first inheritance, copy the old pri
587         // and set inherited flag. We clear it before setting the
588         // pri since set_priority() is inheritance aware.
589         // This is called with the sched locked, so no race conditions.
590
591         priority_inherited = false;     // so that set_prio DTRT
592
593         self->set_priority( pri );            
594
595         if( !already_inherited )
596             original_priority = mypri;
597
598         priority_inherited = true;      // regardless, because it is now
599
600     }
601
602 #endif
603 }
604
605 void Cyg_SchedThread::relay_inherited_priority( Cyg_Thread *ex_owner, Cyg_ThreadQueue *pqueue)
606 {
607     CYG_REPORT_FUNCTION();
608
609 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_SIMPLE
610
611     // A simple implementation of priority inheritance.
612     // At its simplest, this member does nothing.
613
614     // If there is anyone else waiting, then the *new* owner inherits from
615     // the current one, since that is a maxima of the others waiting.
616     // (It's worth not doing if there's nobody waiting to prevent
617     // unneccessary priority skew.)  This could be viewed as a discovered
618     // priority ceiling.
619
620     if ( !pqueue->empty() )
621         set_inherited_priority( ex_owner->get_current_priority(), ex_owner );
622
623 #endif
624 }
625
626 void Cyg_SchedThread::clear_inherited_priority()
627 {
628     CYG_REPORT_FUNCTION();
629
630 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_SIMPLE
631
632     // A simple implementation of priority inheritance/ceiling
633     // protocols.  The simplification in this algorithm is that we do
634     // not reduce our priority until we have freed all mutexes
635     // claimed. Hence we can continue to run at an artificially high
636     // priority even when we should not.  However, since nested
637     // mutexes are rare, the thread we have inherited from is likely
638     // to be locking the same mutexes we are, and mutex claim periods
639     // should be very short, the performance difference between this
640     // and a more complex algorithm should be negligible. The most
641     // important advantage of this algorithm is that it is fast and
642     // deterministic.
643     
644     Cyg_Thread *self = CYG_CLASSFROMBASE(Cyg_Thread,
645                                          Cyg_SchedThread,
646                                          this);
647
648     CYG_ASSERT( mutex_count >= 0, "Non-positive mutex count");
649     
650     if( mutex_count == 0 && priority_inherited )
651     {
652         priority_inherited = false;
653
654         // Only make an effort if the priority must change
655         if( priority < original_priority )
656             self->set_priority( original_priority );
657         
658     }
659     
660 #endif        
661 }
662
663 #endif // CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL
664
665 // -------------------------------------------------------------------------
666 // Priority inheritance support.
667
668 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_INHERIT
669
670 // -------------------------------------------------------------------------
671 // Inherit the priority of the provided thread if it
672 // has a higher priority than ours.
673
674 void Cyg_SchedThread::inherit_priority( Cyg_Thread *thread)
675 {
676     CYG_REPORT_FUNCTION();
677
678     Cyg_Thread *self = CYG_CLASSFROMBASE(Cyg_Thread,
679                                          Cyg_SchedThread,
680                                          this);
681
682     CYG_ASSERT( mutex_count > 0, "Non-positive mutex count");
683     CYG_ASSERT( self != thread, "Trying to inherit from self!");
684
685     self->set_inherited_priority( thread->get_current_priority(), thread );
686     
687 }
688
689 // -------------------------------------------------------------------------
690 // Inherit the priority of the ex-owner thread or from the queue if it
691 // has a higher priority than ours.
692
693 void Cyg_SchedThread::relay_priority( Cyg_Thread *ex_owner, Cyg_ThreadQueue *pqueue)
694 {
695     CYG_REPORT_FUNCTION();
696
697     relay_inherited_priority( ex_owner, pqueue );
698 }
699
700 // -------------------------------------------------------------------------
701 // Lose a priority inheritance
702
703 void Cyg_SchedThread::disinherit_priority()
704 {
705     CYG_REPORT_FUNCTION();
706
707     CYG_ASSERT( mutex_count >= 0, "Non-positive mutex count");
708
709     clear_inherited_priority();
710 }
711
712 #endif // CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_INHERIT
713
714 // -------------------------------------------------------------------------
715 // Priority ceiling support
716
717 #ifdef CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_CEILING
718
719 void Cyg_SchedThread::set_priority_ceiling( cyg_priority pri )
720 {
721     CYG_REPORT_FUNCTION();
722
723     CYG_ASSERT( mutex_count > 0, "Non-positive mutex count");
724
725     set_inherited_priority( pri );
726
727 }
728
729 void Cyg_SchedThread::clear_priority_ceiling( )
730 {
731     CYG_REPORT_FUNCTION();
732
733     CYG_ASSERT( mutex_count >= 0, "Non-positive mutex count");
734
735     clear_inherited_priority();
736 }
737
738 #endif // CYGSEM_KERNEL_SYNCH_MUTEX_PRIORITY_INVERSION_PROTOCOL_CEILING
739
740 // -------------------------------------------------------------------------
741 // EOF sched/sched.cxx