00001
00025 #ifndef _XENO_NUCLEUS_SCHED_H
00026 #define _XENO_NUCLEUS_SCHED_H
00027
00031 #include <nucleus/thread.h>
00032
00033 #if defined(__KERNEL__) || defined(__XENO_SIM__)
00034
00035 #include <nucleus/schedqueue.h>
00036 #include <nucleus/sched-tp.h>
00037 #include <nucleus/sched-sporadic.h>
00038 #include <nucleus/vfile.h>
00039
00040
00041 #define XNKCOUT 0x80000000
00042 #define XNINTCK 0x40000000
00043 #define XNINSW 0x20000000
00044 #define XNRESCHED 0x10000000
00045
00046
00047 #define XNHTICK 0x00008000
00048 #define XNINIRQ 0x00004000
00049 #define XNHDEFER 0x00002000
00050 #define XNINLOCK 0x00001000
00051
00052
00053 #define XNRPICK 0x80000000
00054
00055 struct xnsched_rt {
00056 xnsched_queue_t runnable;
00057 #ifdef CONFIG_XENO_OPT_PRIOCPL
00058 xnsched_queue_t relaxed;
00059 #endif
00060 };
00061
00066 typedef struct xnsched {
00067
00068 xnflags_t status;
00069 xnflags_t lflags;
00070 int cpu;
00071 struct xnthread *curr;
00072 #ifdef CONFIG_SMP
00073 xnarch_cpumask_t resched;
00074 #endif
00075
00076 struct xnsched_rt rt;
00077 #ifdef CONFIG_XENO_OPT_SCHED_TP
00078 struct xnsched_tp tp;
00079 #endif
00080 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
00081 struct xnsched_sporadic pss;
00082 #endif
00083
00084 xntimerq_t timerqueue;
00085 volatile unsigned inesting;
00086 struct xntimer htimer;
00087 struct xnthread *zombie;
00088 struct xnthread rootcb;
00090 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
00091 struct xnthread *last;
00092 #endif
00093
00094 #ifdef CONFIG_XENO_HW_FPU
00095 struct xnthread *fpuholder;
00096 #endif
00097
00098 #ifdef CONFIG_XENO_OPT_WATCHDOG
00099 struct xntimer wdtimer;
00100 int wdcount;
00101 #endif
00102
00103 #ifdef CONFIG_XENO_OPT_STATS
00104 xnticks_t last_account_switch;
00105 xnstat_exectime_t *current_account;
00106 #endif
00107
00108 #ifdef CONFIG_XENO_OPT_PRIOCPL
00109 DECLARE_XNLOCK(rpilock);
00110 xnflags_t rpistatus;
00111 #endif
00112
00113 #ifdef CONFIG_XENO_OPT_PERVASIVE
00114 struct task_struct *gatekeeper;
00115 struct semaphore gksync;
00116 struct xnthread *gktarget;
00117 #endif
00118
00119 } xnsched_t;
00120
00121 union xnsched_policy_param;
00122
00123 struct xnsched_class {
00124
00125 void (*sched_init)(struct xnsched *sched);
00126 void (*sched_enqueue)(struct xnthread *thread);
00127 void (*sched_dequeue)(struct xnthread *thread);
00128 void (*sched_requeue)(struct xnthread *thread);
00129 struct xnthread *(*sched_pick)(struct xnsched *sched);
00130 void (*sched_tick)(struct xnthread *curr);
00131 void (*sched_rotate)(struct xnsched *sched,
00132 const union xnsched_policy_param *p);
00133 void (*sched_migrate)(struct xnthread *thread,
00134 struct xnsched *sched);
00135 void (*sched_setparam)(struct xnthread *thread,
00136 const union xnsched_policy_param *p);
00137 void (*sched_getparam)(struct xnthread *thread,
00138 union xnsched_policy_param *p);
00139 void (*sched_trackprio)(struct xnthread *thread,
00140 const union xnsched_policy_param *p);
00141 int (*sched_declare)(struct xnthread *thread,
00142 const union xnsched_policy_param *p);
00143 void (*sched_forget)(struct xnthread *thread);
00144 #ifdef CONFIG_XENO_OPT_PRIOCPL
00145 struct xnthread *(*sched_push_rpi)(struct xnsched *sched,
00146 struct xnthread *thread);
00147 void (*sched_pop_rpi)(struct xnthread *thread);
00148 struct xnthread *(*sched_peek_rpi)(struct xnsched *sched);
00149 void (*sched_suspend_rpi)(struct xnthread *thread);
00150 void (*sched_resume_rpi)(struct xnthread *thread);
00151 #endif
00152 #ifdef CONFIG_XENO_OPT_VFILE
00153 int (*sched_init_vfile)(struct xnsched_class *schedclass,
00154 struct xnvfile_directory *vfroot);
00155 void (*sched_cleanup_vfile)(struct xnsched_class *schedclass);
00156 #endif
00157 int nthreads;
00158 struct xnsched_class *next;
00159 int weight;
00160 const char *name;
00161 };
00162
00163 #define XNSCHED_CLASS_MAX_PRIO 1024
00164 #define XNSCHED_CLASS_WEIGHT(n) (n * XNSCHED_CLASS_MAX_PRIO)
00165
00166
00167 #define XNSCHED_RUNPRIO 0x80000000
00168
00169 #ifdef CONFIG_SMP
00170 #define xnsched_cpu(__sched__) ((__sched__)->cpu)
00171 #else
00172 #define xnsched_cpu(__sched__) ({ (void)__sched__; 0; })
00173 #endif
00174
00175
00176 static inline int xnsched_resched_p(struct xnsched *sched)
00177 {
00178 return testbits(sched->status, XNRESCHED);
00179 }
00180
00181
00182 #define xnsched_set_self_resched(__sched__) do { \
00183 XENO_BUGON(NUCLEUS, __sched__ != xnpod_current_sched()); \
00184 __setbits((__sched__)->status, XNRESCHED); \
00185 } while (0)
00186
00187
00188 #ifdef CONFIG_SMP
00189 #define xnsched_set_resched(__sched__) do { \
00190 xnsched_t *current_sched = xnpod_current_sched(); \
00191 if (current_sched == (__sched__)) \
00192 __setbits(current_sched->status, XNRESCHED); \
00193 else if (!xnsched_resched_p(__sched__)) { \
00194 xnarch_cpu_set(xnsched_cpu(__sched__), current_sched->resched); \
00195 __setbits((__sched__)->status, XNRESCHED); \
00196 __setbits(current_sched->status, XNRESCHED); \
00197 } \
00198 } while (0)
00199 #else
00200 #define xnsched_set_resched xnsched_set_self_resched
00201 #endif
00202
00203 void xnsched_zombie_hooks(struct xnthread *thread);
00204
00205 void __xnsched_finalize_zombie(struct xnsched *sched);
00206
00207 static inline void xnsched_finalize_zombie(struct xnsched *sched)
00208 {
00209 if (sched->zombie)
00210 __xnsched_finalize_zombie(sched);
00211 }
00212
00213 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
00214
00215 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
00216
00217 #define xnsched_resched_after_unlocked_switch() xnpod_schedule()
00218
00219 static inline
00220 int xnsched_maybe_resched_after_unlocked_switch(struct xnsched *sched)
00221 {
00222 return testbits(sched->status, XNRESCHED);
00223 }
00224
00225 #else
00226
00227 #ifdef CONFIG_SMP
00228 #define xnsched_finish_unlocked_switch(__sched__) \
00229 ({ XENO_BUGON(NUCLEUS, !irqs_disabled_hw()); \
00230 xnpod_current_sched(); })
00231 #else
00232 #define xnsched_finish_unlocked_switch(__sched__) \
00233 ({ XENO_BUGON(NUCLEUS, !irqs_disabled_hw()); \
00234 (__sched__); })
00235 #endif
00236
00237 #define xnsched_resched_after_unlocked_switch() do { } while(0)
00238
00239 #define xnsched_maybe_resched_after_unlocked_switch(sched) \
00240 ({ (void)(sched); 0; })
00241
00242 #endif
00243
00244 #ifdef CONFIG_XENO_OPT_WATCHDOG
00245 static inline void xnsched_reset_watchdog(struct xnsched *sched)
00246 {
00247 sched->wdcount = 0;
00248 }
00249 #else
00250 static inline void xnsched_reset_watchdog(struct xnsched *sched)
00251 {
00252 }
00253 #endif
00254
00255 #include <nucleus/sched-idle.h>
00256 #include <nucleus/sched-rt.h>
00257
00258 int xnsched_init_proc(void);
00259
00260 void xnsched_cleanup_proc(void);
00261
00262 void xnsched_register_classes(void);
00263
00264 void xnsched_init(struct xnsched *sched, int cpu);
00265
00266 void xnsched_destroy(struct xnsched *sched);
00267
00268 struct xnthread *xnsched_pick_next(struct xnsched *sched);
00269
00270 void xnsched_putback(struct xnthread *thread);
00271
00272 int xnsched_set_policy(struct xnthread *thread,
00273 struct xnsched_class *sched_class,
00274 const union xnsched_policy_param *p);
00275
00276 void xnsched_track_policy(struct xnthread *thread,
00277 struct xnthread *target);
00278
00279 void xnsched_migrate(struct xnthread *thread,
00280 struct xnsched *sched);
00281
00282 void xnsched_migrate_passive(struct xnthread *thread,
00283 struct xnsched *sched);
00284
00316 static inline void xnsched_rotate(struct xnsched *sched,
00317 struct xnsched_class *sched_class,
00318 const union xnsched_policy_param *sched_param)
00319 {
00320 sched_class->sched_rotate(sched, sched_param);
00321 }
00322
00323 static inline int xnsched_init_tcb(struct xnthread *thread)
00324 {
00325 int ret = 0;
00326
00327 xnsched_idle_init_tcb(thread);
00328 xnsched_rt_init_tcb(thread);
00329 #ifdef CONFIG_XENO_OPT_SCHED_TP
00330 ret = xnsched_tp_init_tcb(thread);
00331 if (ret)
00332 return ret;
00333 #endif
00334 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
00335 ret = xnsched_sporadic_init_tcb(thread);
00336 if (ret)
00337 return ret;
00338 #endif
00339 return ret;
00340 }
00341
00342 static inline int xnsched_root_priority(struct xnsched *sched)
00343 {
00344 return sched->rootcb.cprio;
00345 }
00346
00347 static inline struct xnsched_class *xnsched_root_class(struct xnsched *sched)
00348 {
00349 return sched->rootcb.sched_class;
00350 }
00351
00352 static inline void xnsched_tick(struct xnthread *curr, struct xntbase *tbase)
00353 {
00354 struct xnsched_class *sched_class = curr->sched_class;
00355
00356
00357
00358
00359
00360 if (xnthread_time_base(curr) == tbase &&
00361 sched_class != &xnsched_class_idle &&
00362 sched_class == curr->base_class &&
00363 xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNLOCK|XNRRB) == XNRRB)
00364 sched_class->sched_tick(curr);
00365 }
00366
00367 #ifdef CONFIG_XENO_OPT_SCHED_CLASSES
00368
00369 static inline void xnsched_enqueue(struct xnthread *thread)
00370 {
00371 struct xnsched_class *sched_class = thread->sched_class;
00372
00373 if (sched_class != &xnsched_class_idle)
00374 sched_class->sched_enqueue(thread);
00375 }
00376
00377 static inline void xnsched_dequeue(struct xnthread *thread)
00378 {
00379 struct xnsched_class *sched_class = thread->sched_class;
00380
00381 if (sched_class != &xnsched_class_idle)
00382 sched_class->sched_dequeue(thread);
00383 }
00384
00385 static inline void xnsched_requeue(struct xnthread *thread)
00386 {
00387 struct xnsched_class *sched_class = thread->sched_class;
00388
00389 if (sched_class != &xnsched_class_idle)
00390 sched_class->sched_requeue(thread);
00391 }
00392
00393 static inline int xnsched_weighted_bprio(struct xnthread *thread)
00394 {
00395 return thread->bprio + thread->sched_class->weight;
00396 }
00397
00398 static inline int xnsched_weighted_cprio(struct xnthread *thread)
00399 {
00400 return thread->cprio + thread->sched_class->weight;
00401 }
00402
00403 static inline void xnsched_setparam(struct xnthread *thread,
00404 const union xnsched_policy_param *p)
00405 {
00406 thread->sched_class->sched_setparam(thread, p);
00407 }
00408
00409 static inline void xnsched_getparam(struct xnthread *thread,
00410 union xnsched_policy_param *p)
00411 {
00412 thread->sched_class->sched_getparam(thread, p);
00413 }
00414
00415 static inline void xnsched_trackprio(struct xnthread *thread,
00416 const union xnsched_policy_param *p)
00417 {
00418 thread->sched_class->sched_trackprio(thread, p);
00419 }
00420
00421 static inline void xnsched_forget(struct xnthread *thread)
00422 {
00423 struct xnsched_class *sched_class = thread->base_class;
00424
00425 --sched_class->nthreads;
00426
00427 if (sched_class->sched_forget)
00428 sched_class->sched_forget(thread);
00429 }
00430
00431 #ifdef CONFIG_XENO_OPT_PRIOCPL
00432
00433 static inline struct xnthread *xnsched_push_rpi(struct xnsched *sched,
00434 struct xnthread *thread)
00435 {
00436 return thread->sched_class->sched_push_rpi(sched, thread);
00437 }
00438
00439 static inline void xnsched_pop_rpi(struct xnthread *thread)
00440 {
00441 thread->sched_class->sched_pop_rpi(thread);
00442 }
00443
00444 static inline void xnsched_suspend_rpi(struct xnthread *thread)
00445 {
00446 struct xnsched_class *sched_class = thread->sched_class;
00447
00448 if (sched_class->sched_suspend_rpi)
00449 sched_class->sched_suspend_rpi(thread);
00450 }
00451
00452 static inline void xnsched_resume_rpi(struct xnthread *thread)
00453 {
00454 struct xnsched_class *sched_class = thread->sched_class;
00455
00456 if (sched_class->sched_resume_rpi)
00457 sched_class->sched_resume_rpi(thread);
00458 }
00459
00460 #endif
00461
00462 #else
00463
00464
00465
00466
00467
00468
00469 static inline void xnsched_enqueue(struct xnthread *thread)
00470 {
00471 struct xnsched_class *sched_class = thread->sched_class;
00472
00473 if (sched_class != &xnsched_class_idle)
00474 __xnsched_rt_enqueue(thread);
00475 }
00476
00477 static inline void xnsched_dequeue(struct xnthread *thread)
00478 {
00479 struct xnsched_class *sched_class = thread->sched_class;
00480
00481 if (sched_class != &xnsched_class_idle)
00482 __xnsched_rt_dequeue(thread);
00483 }
00484
00485 static inline void xnsched_requeue(struct xnthread *thread)
00486 {
00487 struct xnsched_class *sched_class = thread->sched_class;
00488
00489 if (sched_class != &xnsched_class_idle)
00490 __xnsched_rt_requeue(thread);
00491 }
00492
00493 static inline int xnsched_weighted_bprio(struct xnthread *thread)
00494 {
00495 return thread->bprio;
00496 }
00497
00498 static inline int xnsched_weighted_cprio(struct xnthread *thread)
00499 {
00500 return thread->cprio;
00501 }
00502
00503 static inline void xnsched_setparam(struct xnthread *thread,
00504 const union xnsched_policy_param *p)
00505 {
00506 struct xnsched_class *sched_class = thread->sched_class;
00507
00508 if (sched_class != &xnsched_class_idle)
00509 __xnsched_rt_setparam(thread, p);
00510 else
00511 __xnsched_idle_setparam(thread, p);
00512 }
00513
00514 static inline void xnsched_getparam(struct xnthread *thread,
00515 union xnsched_policy_param *p)
00516 {
00517 struct xnsched_class *sched_class = thread->sched_class;
00518
00519 if (sched_class != &xnsched_class_idle)
00520 __xnsched_rt_getparam(thread, p);
00521 else
00522 __xnsched_idle_getparam(thread, p);
00523 }
00524
00525 static inline void xnsched_trackprio(struct xnthread *thread,
00526 const union xnsched_policy_param *p)
00527 {
00528 struct xnsched_class *sched_class = thread->sched_class;
00529
00530 if (sched_class != &xnsched_class_idle)
00531 __xnsched_rt_trackprio(thread, p);
00532 else
00533 __xnsched_idle_trackprio(thread, p);
00534 }
00535
00536 static inline void xnsched_forget(struct xnthread *thread)
00537 {
00538 --thread->base_class->nthreads;
00539 __xnsched_rt_forget(thread);
00540 }
00541
00542 #ifdef CONFIG_XENO_OPT_PRIOCPL
00543
00544 static inline struct xnthread *xnsched_push_rpi(struct xnsched *sched,
00545 struct xnthread *thread)
00546 {
00547 return __xnsched_rt_push_rpi(sched, thread);
00548 }
00549
00550 static inline void xnsched_pop_rpi(struct xnthread *thread)
00551 {
00552 __xnsched_rt_pop_rpi(thread);
00553 }
00554
00555 static inline void xnsched_suspend_rpi(struct xnthread *thread)
00556 {
00557 __xnsched_rt_suspend_rpi(thread);
00558 }
00559
00560 static inline void xnsched_resume_rpi(struct xnthread *thread)
00561 {
00562 __xnsched_rt_resume_rpi(thread);
00563 }
00564
00565 #endif
00566
00567 #endif
00568
00569 void xnsched_renice_root(struct xnsched *sched,
00570 struct xnthread *target);
00571
00572 struct xnthread *xnsched_peek_rpi(struct xnsched *sched);
00573
00574 #else
00575
00576 #include <nucleus/sched-idle.h>
00577 #include <nucleus/sched-rt.h>
00578
00579 #endif
00580
00583 #endif