2006-11-11 11:27:52 -05:00
|
|
|
/*-
|
|
|
|
|
* Copyright (c) 1996, 1997
|
|
|
|
|
* HD Associates, Inc. All rights reserved.
|
|
|
|
|
*
|
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
|
* are met:
|
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
|
* This product includes software developed by HD Associates, Inc
|
|
|
|
|
* and Jukka Antero Ukkonen.
|
|
|
|
|
* 4. Neither the name of the author nor the names of any co-contributors
|
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
|
* without specific prior written permission.
|
|
|
|
|
*
|
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY HD ASSOCIATES AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL HD ASSOCIATES OR CONTRIBUTORS BE LIABLE
|
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
|
*/
|
|
|
|
|
|
2002-10-12 01:32:24 -04:00
|
|
|
/*-
|
2008-03-02 02:19:35 -05:00
|
|
|
* Copyright (c) 2002-2008, Jeffrey Roberson <jeff@freebsd.org>
|
2002-10-12 01:32:24 -04:00
|
|
|
* All rights reserved.
|
|
|
|
|
*
|
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
|
* are met:
|
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
|
* notice unmodified, this list of conditions, and the following
|
|
|
|
|
* disclaimer.
|
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
|
*
|
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
*
|
|
|
|
|
* $FreeBSD$
|
|
|
|
|
*/
|
|
|
|
|
|
2006-11-11 11:27:52 -05:00
|
|
|
#ifndef _SCHED_H_
|
|
|
|
|
#define _SCHED_H_
|
2002-10-12 01:32:24 -04:00
|
|
|
|
2006-11-11 11:27:52 -05:00
|
|
|
#ifdef _KERNEL
|
2002-10-12 01:32:24 -04:00
|
|
|
/*
|
|
|
|
|
* General scheduling info.
|
2004-01-31 21:44:35 -05:00
|
|
|
*
|
|
|
|
|
* sched_load:
|
|
|
|
|
* Total runnable non-ithread threads in the system.
|
|
|
|
|
*
|
|
|
|
|
* sched_runnable:
|
|
|
|
|
* Runnable threads for this processor.
|
2002-10-12 01:32:24 -04:00
|
|
|
*/
|
2004-01-31 21:44:35 -05:00
|
|
|
int sched_load(void);
|
2002-10-12 01:32:24 -04:00
|
|
|
int sched_rr_interval(void);
|
|
|
|
|
int sched_runnable(void);
|
|
|
|
|
|
2003-04-10 23:39:07 -04:00
|
|
|
/*
|
|
|
|
|
* Proc related scheduling hooks.
|
|
|
|
|
*/
|
2004-07-18 19:36:13 -04:00
|
|
|
void sched_exit(struct proc *p, struct thread *childtd);
|
2004-09-04 22:09:54 -04:00
|
|
|
void sched_fork(struct thread *td, struct thread *childtd);
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 19:50:30 -04:00
|
|
|
void sched_fork_exit(struct thread *td);
|
2006-10-26 17:42:22 -04:00
|
|
|
void sched_class(struct thread *td, int class);
|
2004-06-15 20:26:31 -04:00
|
|
|
void sched_nice(struct proc *p, int nice);
|
2002-10-12 01:32:24 -04:00
|
|
|
|
|
|
|
|
/*
|
2003-11-04 02:18:18 -05:00
|
|
|
* Threads are switched in and out, block on resources, have temporary
|
2006-12-06 01:34:57 -05:00
|
|
|
* priorities inherited from their procs, and use up cpu time.
|
2002-10-12 01:32:24 -04:00
|
|
|
*/
|
2003-04-10 23:39:07 -04:00
|
|
|
void sched_exit_thread(struct thread *td, struct thread *child);
|
|
|
|
|
void sched_fork_thread(struct thread *td, struct thread *child);
|
2006-08-25 02:12:53 -04:00
|
|
|
void sched_lend_prio(struct thread *td, u_char prio);
|
|
|
|
|
void sched_lend_user_prio(struct thread *td, u_char pri);
|
2003-11-04 02:18:18 -05:00
|
|
|
fixpt_t sched_pctcpu(struct thread *td);
|
2003-04-10 23:39:07 -04:00
|
|
|
void sched_prio(struct thread *td, u_char prio);
|
2008-03-12 02:31:06 -04:00
|
|
|
void sched_sleep(struct thread *td, int prio);
|
2004-09-10 17:04:38 -04:00
|
|
|
void sched_switch(struct thread *td, struct thread *newtd, int flags);
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 19:50:30 -04:00
|
|
|
void sched_throw(struct thread *td);
|
Rework the interface between priority propagation (lending) and the
schedulers a bit to ensure more correct handling of priorities and fewer
priority inversions:
- Add two functions to the sched(9) API to handle priority lending:
sched_lend_prio() and sched_unlend_prio(). The turnstile code uses these
functions to ask the scheduler to lend a thread a set priority and to
tell the scheduler when it thinks it is ok for a thread to stop borrowing
priority. The unlend case is slightly complex in that the turnstile code
tells the scheduler what the minimum priority of the thread needs to be
to satisfy the requirements of any other threads blocked on locks owned
by the thread in question. The scheduler then decides where the thread
can go back to normal mode (if it's normal priority is high enough to
satisfy the pending lock requests) or it it should continue to use the
priority specified to the sched_unlend_prio() call. This involves adding
a new per-thread flag TDF_BORROWING that replaces the ULE-only kse flag
for priority elevation.
- Schedulers now refuse to lower the priority of a thread that is currently
borrowing another therad's priority.
- If a scheduler changes the priority of a thread that is currently sitting
on a turnstile, it will call a new function turnstile_adjust() to inform
the turnstile code of the change. This function resorts the thread on
the priority list of the turnstile if needed, and if the thread ends up
at the head of the list (due to having the highest priority) and its
priority was raised, then it will propagate that new priority to the
owner of the lock it is blocked on.
Some additional fixes specific to the 4BSD scheduler include:
- Common code for updating the priority of a thread when the user priority
of its associated kse group has been consolidated in a new static
function resetpriority_thread(). One change to this function is that
it will now only adjust the priority of a thread if it already has a
time sharing priority, thus preserving any boosts from a tsleep() until
the thread returns to userland. Also, resetpriority() no longer calls
maybe_resched() on each thread in the group. Instead, the code calling
resetpriority() is responsible for calling resetpriority_thread() on
any threads that need to be updated.
- schedcpu() now uses resetpriority_thread() instead of just calling
sched_prio() directly after it updates a kse group's user priority.
- sched_clock() now uses resetpriority_thread() rather than writing
directly to td_priority.
- sched_nice() now updates all the priorities of the threads after the
group priority has been adjusted.
Discussed with: bde
Reviewed by: ups, jeffr
Tested on: 4bsd, ule
Tested on: i386, alpha, sparc64
2004-12-30 15:52:44 -05:00
|
|
|
void sched_unlend_prio(struct thread *td, u_char prio);
|
2006-10-26 17:42:22 -04:00
|
|
|
void sched_user_prio(struct thread *td, u_char prio);
|
2003-04-10 23:39:07 -04:00
|
|
|
void sched_userret(struct thread *td);
|
2002-10-12 01:32:24 -04:00
|
|
|
void sched_wakeup(struct thread *td);
|
2008-03-09 21:30:35 -04:00
|
|
|
void sched_preempt(struct thread *td);
|
2002-10-12 01:32:24 -04:00
|
|
|
|
|
|
|
|
/*
|
2003-11-04 02:18:18 -05:00
|
|
|
* Threads are moved on and off of run queues
|
2002-10-12 01:32:24 -04:00
|
|
|
*/
|
2004-08-31 22:11:28 -04:00
|
|
|
void sched_add(struct thread *td, int flags);
|
2003-10-16 04:39:15 -04:00
|
|
|
void sched_clock(struct thread *td);
|
|
|
|
|
void sched_rem(struct thread *td);
|
Refactor timer management code with priority to one-shot operation mode.
The main goal of this is to generate timer interrupts only when there is
some work to do. When CPU is busy interrupts are generating at full rate
of hz + stathz to fullfill scheduler and timekeeping requirements. But
when CPU is idle, only minimum set of interrupts (down to 8 interrupts per
second per CPU now), needed to handle scheduled callouts is executed.
This allows significantly increase idle CPU sleep time, increasing effect
of static power-saving technologies. Also it should reduce host CPU load
on virtualized systems, when guest system is idle.
There is set of tunables, also available as writable sysctls, allowing to
control wanted event timer subsystem behavior:
kern.eventtimer.timer - allows to choose event timer hardware to use.
On x86 there is up to 4 different kinds of timers. Depending on whether
chosen timer is per-CPU, behavior of other options slightly differs.
kern.eventtimer.periodic - allows to choose periodic and one-shot
operation mode. In periodic mode, current timer hardware taken as the only
source of time for time events. This mode is quite alike to previous kernel
behavior. One-shot mode instead uses currently selected time counter
hardware to schedule all needed events one by one and program timer to
generate interrupt exactly in specified time. Default value depends of
chosen timer capabilities, but one-shot mode is preferred, until other is
forced by user or hardware.
kern.eventtimer.singlemul - in periodic mode specifies how much times
higher timer frequency should be, to not strictly alias hardclock() and
statclock() events. Default values are 2 and 4, but could be reduced to 1
if extra interrupts are unwanted.
kern.eventtimer.idletick - makes each CPU to receive every timer interrupt
independently of whether they busy or not. By default this options is
disabled. If chosen timer is per-CPU and runs in periodic mode, this option
has no effect - all interrupts are generating.
As soon as this patch modifies cpu_idle() on some platforms, I have also
refactored one on x86. Now it makes use of MONITOR/MWAIT instrunctions
(if supported) under high sleep/wakeup rate, as fast alternative to other
methods. It allows SMP scheduler to wake up sleeping CPUs much faster
without using IPI, significantly increasing performance on some highly
task-switching loads.
Tested by: many (on i386, amd64, sparc64 and powerc)
H/W donated by: Gheorghe Ardelean
Sponsored by: iXsystems, Inc.
2010-09-13 03:25:35 -04:00
|
|
|
void sched_tick(int cnt);
|
2006-06-15 02:37:39 -04:00
|
|
|
void sched_relinquish(struct thread *td);
|
2007-01-23 03:46:51 -05:00
|
|
|
struct thread *sched_choose(void);
|
|
|
|
|
void sched_idletd(void *);
|
2002-10-12 01:32:24 -04:00
|
|
|
|
2002-11-21 04:30:55 -05:00
|
|
|
/*
|
2003-11-04 02:18:18 -05:00
|
|
|
* Binding makes cpu affinity permanent while pinning is used to temporarily
|
|
|
|
|
* hold a thread on a particular CPU.
|
2002-11-21 04:30:55 -05:00
|
|
|
*/
|
2003-11-04 02:18:18 -05:00
|
|
|
void sched_bind(struct thread *td, int cpu);
|
2004-09-11 06:07:22 -04:00
|
|
|
static __inline void sched_pin(void);
|
2003-11-04 02:18:18 -05:00
|
|
|
void sched_unbind(struct thread *td);
|
2004-09-11 06:07:22 -04:00
|
|
|
static __inline void sched_unpin(void);
|
2005-04-19 00:01:25 -04:00
|
|
|
int sched_is_bound(struct thread *td);
|
2008-03-02 02:19:35 -05:00
|
|
|
void sched_affinity(struct thread *td);
|
2003-11-04 02:18:18 -05:00
|
|
|
|
2002-11-20 20:22:38 -05:00
|
|
|
/*
|
|
|
|
|
* These procedures tell the process data structure allocation code how
|
|
|
|
|
* many bytes to actually allocate.
|
|
|
|
|
*/
|
|
|
|
|
int sched_sizeof_proc(void);
|
|
|
|
|
int sched_sizeof_thread(void);
|
|
|
|
|
|
2009-01-17 02:17:57 -05:00
|
|
|
/*
|
|
|
|
|
* This routine provides a consistent thread name for use with KTR graphing
|
|
|
|
|
* functions.
|
|
|
|
|
*/
|
|
|
|
|
char *sched_tdname(struct thread *td);
|
|
|
|
|
|
2004-09-11 06:07:22 -04:00
|
|
|
static __inline void
|
|
|
|
|
sched_pin(void)
|
|
|
|
|
{
|
|
|
|
|
curthread->td_pinned++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static __inline void
|
|
|
|
|
sched_unpin(void)
|
|
|
|
|
{
|
|
|
|
|
curthread->td_pinned--;
|
|
|
|
|
}
|
|
|
|
|
|
2007-01-23 03:46:51 -05:00
|
|
|
/* sched_add arguments (formerly setrunqueue) */
|
|
|
|
|
#define SRQ_BORING 0x0000 /* No special circumstances. */
|
|
|
|
|
#define SRQ_YIELDING 0x0001 /* We are yielding (from mi_switch). */
|
|
|
|
|
#define SRQ_OURSELF 0x0002 /* It is ourself (from mi_switch). */
|
|
|
|
|
#define SRQ_INTR 0x0004 /* It is probably urgent. */
|
|
|
|
|
#define SRQ_PREEMPTED 0x0008 /* has been preempted.. be kind */
|
|
|
|
|
#define SRQ_BORROWING 0x0010 /* Priority updated due to prio_lend */
|
|
|
|
|
|
2008-04-17 00:20:10 -04:00
|
|
|
/* Scheduler stats. */
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 19:50:30 -04:00
|
|
|
#ifdef SCHED_STATS
|
2009-06-24 21:33:51 -04:00
|
|
|
DPCPU_DECLARE(long, sched_switch_stats[SWT_COUNT]);
|
2008-04-17 00:20:10 -04:00
|
|
|
|
|
|
|
|
#define SCHED_STAT_DEFINE_VAR(name, ptr, descr) \
|
2009-06-24 21:33:51 -04:00
|
|
|
static void name ## _add_proc(void *dummy __unused) \
|
|
|
|
|
{ \
|
|
|
|
|
\
|
|
|
|
|
SYSCTL_ADD_PROC(NULL, \
|
|
|
|
|
SYSCTL_STATIC_CHILDREN(_kern_sched_stats), OID_AUTO, \
|
|
|
|
|
#name, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE, \
|
|
|
|
|
ptr, 0, sysctl_dpcpu_long, "LU", descr); \
|
|
|
|
|
} \
|
2010-09-30 13:05:23 -04:00
|
|
|
SYSINIT(name, SI_SUB_RUN_SCHEDULER, SI_ORDER_MIDDLE, name ## _add_proc, NULL);
|
2009-06-24 21:33:51 -04:00
|
|
|
|
2008-04-17 00:20:10 -04:00
|
|
|
#define SCHED_STAT_DEFINE(name, descr) \
|
2009-06-24 21:33:51 -04:00
|
|
|
DPCPU_DEFINE(unsigned long, name); \
|
|
|
|
|
SCHED_STAT_DEFINE_VAR(name, &DPCPU_NAME(name), descr)
|
|
|
|
|
/*
|
|
|
|
|
* Sched stats are always incremented in critical sections so no atomic
|
|
|
|
|
* is necesssary to increment them.
|
|
|
|
|
*/
|
|
|
|
|
#define SCHED_STAT_INC(var) DPCPU_GET(var)++;
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 19:50:30 -04:00
|
|
|
#else
|
2008-04-17 00:20:10 -04:00
|
|
|
#define SCHED_STAT_DEFINE_VAR(name, descr, ptr)
|
|
|
|
|
#define SCHED_STAT_DEFINE(name, descr)
|
2009-06-21 05:01:12 -04:00
|
|
|
#define SCHED_STAT_INC(var) (void)0
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 19:50:30 -04:00
|
|
|
#endif
|
2007-01-23 03:46:51 -05:00
|
|
|
|
2008-03-19 23:09:15 -04:00
|
|
|
/*
|
|
|
|
|
* Fixup scheduler state for proc0 and thread0
|
|
|
|
|
*/
|
2004-09-04 22:09:54 -04:00
|
|
|
void schedinit(void);
|
2006-11-11 11:27:52 -05:00
|
|
|
#endif /* _KERNEL */
|
|
|
|
|
|
|
|
|
|
/* POSIX 1003.1b Process Scheduling */
|
2004-09-19 14:34:17 -04:00
|
|
|
|
2006-11-11 11:27:52 -05:00
|
|
|
/*
|
|
|
|
|
* POSIX scheduling policies
|
|
|
|
|
*/
|
|
|
|
|
#define SCHED_FIFO 1
|
|
|
|
|
#define SCHED_OTHER 2
|
|
|
|
|
#define SCHED_RR 3
|
|
|
|
|
|
|
|
|
|
struct sched_param {
|
|
|
|
|
int sched_priority;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* POSIX scheduling declarations for userland.
|
|
|
|
|
*/
|
|
|
|
|
#ifndef _KERNEL
|
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
|
#include <sys/_types.h>
|
|
|
|
|
|
|
|
|
|
#ifndef _PID_T_DECLARED
|
|
|
|
|
typedef __pid_t pid_t;
|
|
|
|
|
#define _PID_T_DECLARED
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
struct timespec;
|
|
|
|
|
|
|
|
|
|
__BEGIN_DECLS
|
|
|
|
|
int sched_get_priority_max(int);
|
|
|
|
|
int sched_get_priority_min(int);
|
|
|
|
|
int sched_getparam(pid_t, struct sched_param *);
|
|
|
|
|
int sched_getscheduler(pid_t);
|
|
|
|
|
int sched_rr_get_interval(pid_t, struct timespec *);
|
|
|
|
|
int sched_setparam(pid_t, const struct sched_param *);
|
|
|
|
|
int sched_setscheduler(pid_t, int, const struct sched_param *);
|
|
|
|
|
int sched_yield(void);
|
|
|
|
|
__END_DECLS
|
|
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
#endif /* !_SCHED_H_ */
|