2022-03-05 21:17:59 +05:00
/* SPDX-License-Identifier: GPL-2.0 */
2021-10-27 18:46:41 +05:00
# ifdef CONFIG_SCHEDSTATS
2022-03-05 21:17:59 +05:00
extern struct static_key_false sched_schedstats ;
2021-10-27 18:46:41 +05:00
/*
* Expects runqueue lock to be held for atomicity of update
*/
static inline void
rq_sched_info_arrive ( struct rq * rq , unsigned long long delta )
{
if ( rq ) {
rq - > rq_sched_info . run_delay + = delta ;
rq - > rq_sched_info . pcount + + ;
}
}
/*
* Expects runqueue lock to be held for atomicity of update
*/
static inline void
rq_sched_info_depart ( struct rq * rq , unsigned long long delta )
{
if ( rq )
rq - > rq_cpu_time + = delta ;
}
static inline void
2022-03-05 21:17:59 +05:00
rq_sched_info_dequeue ( struct rq * rq , unsigned long long delta )
2021-10-27 18:46:41 +05:00
{
if ( rq )
rq - > rq_sched_info . run_delay + = delta ;
}
2022-03-05 21:17:59 +05:00
# define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
# define __schedstat_inc(var) do { var++; } while (0)
# define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
# define __schedstat_add(var, amt) do { var += (amt); } while (0)
# define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
# define __schedstat_set(var, val) do { var = (val); } while (0)
# define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
# define schedstat_val(var) (var)
# define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
void __update_stats_wait_start ( struct rq * rq , struct task_struct * p ,
struct sched_statistics * stats ) ;
void __update_stats_wait_end ( struct rq * rq , struct task_struct * p ,
struct sched_statistics * stats ) ;
void __update_stats_enqueue_sleeper ( struct rq * rq , struct task_struct * p ,
struct sched_statistics * stats ) ;
2021-11-13 09:26:51 +05:00
static inline void
2022-03-05 21:17:59 +05:00
check_schedstat_required ( void )
{
if ( schedstat_enabled ( ) )
return ;
/* Force schedstat enabled if a dependent tracepoint is active */
if ( trace_sched_stat_wait_enabled ( ) | |
trace_sched_stat_sleep_enabled ( ) | |
trace_sched_stat_iowait_enabled ( ) | |
trace_sched_stat_blocked_enabled ( ) | |
trace_sched_stat_runtime_enabled ( ) )
printk_deferred_once ( " Scheduler tracepoints stat_sleep, stat_iowait, stat_blocked and stat_runtime require the kernel parameter schedstats=enable or kernel.sched_schedstats=1 \n " ) ;
}
# else /* !CONFIG_SCHEDSTATS: */
static inline void rq_sched_info_arrive ( struct rq * rq , unsigned long long delta ) { }
static inline void rq_sched_info_dequeue ( struct rq * rq , unsigned long long delta ) { }
static inline void rq_sched_info_depart ( struct rq * rq , unsigned long long delta ) { }
# define schedstat_enabled() 0
# define __schedstat_inc(var) do { } while (0)
# define schedstat_inc(var) do { } while (0)
# define __schedstat_add(var, amt) do { } while (0)
# define schedstat_add(var, amt) do { } while (0)
# define __schedstat_set(var, val) do { } while (0)
# define schedstat_set(var, val) do { } while (0)
# define schedstat_val(var) 0
# define schedstat_val_or_zero(var) 0
# define __update_stats_wait_start(rq, p, stats) do { } while (0)
# define __update_stats_wait_end(rq, p, stats) do { } while (0)
# define __update_stats_enqueue_sleeper(rq, p, stats) do { } while (0)
# define check_schedstat_required() do { } while (0)
2021-10-27 18:46:41 +05:00
# endif /* CONFIG_SCHEDSTATS */
2022-03-05 21:17:59 +05:00
# ifdef CONFIG_FAIR_GROUP_SCHED
struct sched_entity_stats {
struct sched_entity se ;
struct sched_statistics stats ;
} __no_randomize_layout ;
# endif
static inline struct sched_statistics *
__schedstats_from_se ( struct sched_entity * se )
2021-10-27 18:46:41 +05:00
{
2022-03-05 21:17:59 +05:00
# ifdef CONFIG_FAIR_GROUP_SCHED
if ( ! entity_is_task ( se ) )
return & container_of ( se , struct sched_entity_stats , se ) - > stats ;
# endif
return & task_of ( se ) - > stats ;
}
# ifdef CONFIG_PSI
/*
* PSI tracks state that persists across sleeps , such as iowaits and
* memory stalls . As a result , it has to distinguish between sleeps ,
* where a task ' s runnable state changes , and requeues , where a task
* and its state are being moved between CPUs and runqueues .
*/
static inline void psi_enqueue ( struct task_struct * p , bool wakeup )
{
int clear = 0 , set = TSK_RUNNING ;
if ( static_branch_likely ( & psi_disabled ) )
return ;
if ( p - > in_memstall )
set | = TSK_MEMSTALL_RUNNING ;
if ( ! wakeup | | p - > sched_psi_wake_requeue ) {
if ( p - > in_memstall )
set | = TSK_MEMSTALL ;
if ( p - > sched_psi_wake_requeue )
p - > sched_psi_wake_requeue = 0 ;
} else {
if ( p - > in_iowait )
clear | = TSK_IOWAIT ;
}
psi_task_change ( p , clear , set ) ;
}
static inline void psi_dequeue ( struct task_struct * p , bool sleep )
{
int clear = TSK_RUNNING ;
if ( static_branch_likely ( & psi_disabled ) )
return ;
/*
* A voluntary sleep is a dequeue followed by a task switch . To
* avoid walking all ancestors twice , psi_task_switch ( ) handles
* TSK_RUNNING and TSK_IOWAIT for us when it moves TSK_ONCPU .
* Do nothing here .
*/
if ( sleep )
return ;
if ( p - > in_memstall )
clear | = ( TSK_MEMSTALL | TSK_MEMSTALL_RUNNING ) ;
psi_task_change ( p , clear , 0 ) ;
}
static inline void psi_ttwu_dequeue ( struct task_struct * p )
{
if ( static_branch_likely ( & psi_disabled ) )
return ;
/*
* Is the task being migrated during a wakeup ? Make sure to
* deregister its sleep - persistent psi states from the old
* queue , and let psi_enqueue ( ) know it has to requeue .
*/
if ( unlikely ( p - > in_iowait | | p - > in_memstall ) ) {
struct rq_flags rf ;
struct rq * rq ;
int clear = 0 ;
if ( p - > in_iowait )
clear | = TSK_IOWAIT ;
if ( p - > in_memstall )
clear | = TSK_MEMSTALL ;
rq = __task_rq_lock ( p , & rf ) ;
psi_task_change ( p , clear , 0 ) ;
p - > sched_psi_wake_requeue = 1 ;
__task_rq_unlock ( rq , & rf ) ;
}
}
static inline void psi_sched_switch ( struct task_struct * prev ,
struct task_struct * next ,
bool sleep )
{
if ( static_branch_likely ( & psi_disabled ) )
return ;
psi_task_switch ( prev , next , sleep ) ;
2021-10-27 18:46:41 +05:00
}
2022-03-05 21:17:59 +05:00
# else /* CONFIG_PSI */
static inline void psi_enqueue ( struct task_struct * p , bool wakeup ) { }
static inline void psi_dequeue ( struct task_struct * p , bool sleep ) { }
static inline void psi_ttwu_dequeue ( struct task_struct * p ) { }
static inline void psi_sched_switch ( struct task_struct * prev ,
struct task_struct * next ,
bool sleep ) { }
# endif /* CONFIG_PSI */
# ifdef CONFIG_SCHED_INFO
2021-10-27 18:46:41 +05:00
/*
* We are interested in knowing how long it was from the * first * time a
2022-03-05 21:17:59 +05:00
* task was queued to the time that it finally hit a CPU , we call this routine
* from dequeue_task ( ) to account for possible rq - > clock skew across CPUs . The
* delta taken on each CPU would annul the skew .
2021-10-27 18:46:41 +05:00
*/
2022-03-05 21:17:59 +05:00
static inline void sched_info_dequeue ( struct rq * rq , struct task_struct * t )
2021-10-27 18:46:41 +05:00
{
2022-03-05 21:17:59 +05:00
unsigned long long delta = 0 ;
2021-10-27 18:46:41 +05:00
2022-03-05 21:17:59 +05:00
if ( ! t - > sched_info . last_queued )
return ;
delta = rq_clock ( rq ) - t - > sched_info . last_queued ;
t - > sched_info . last_queued = 0 ;
2021-10-27 18:46:41 +05:00
t - > sched_info . run_delay + = delta ;
2022-03-05 21:17:59 +05:00
rq_sched_info_dequeue ( rq , delta ) ;
2021-10-27 18:46:41 +05:00
}
/*
2022-03-05 21:17:59 +05:00
* Called when a task finally hits the CPU . We can now calculate how
2021-10-27 18:46:41 +05:00
* long it was waiting to run . We also note when it began so that we
* can keep stats on how long its timeslice is .
*/
static void sched_info_arrive ( struct rq * rq , struct task_struct * t )
{
2022-03-05 21:17:59 +05:00
unsigned long long now , delta = 0 ;
2021-10-27 18:46:41 +05:00
2022-03-05 21:17:59 +05:00
if ( ! t - > sched_info . last_queued )
return ;
now = rq_clock ( rq ) ;
delta = now - t - > sched_info . last_queued ;
t - > sched_info . last_queued = 0 ;
2021-10-27 18:46:41 +05:00
t - > sched_info . run_delay + = delta ;
t - > sched_info . last_arrival = now ;
t - > sched_info . pcount + + ;
rq_sched_info_arrive ( rq , delta ) ;
}
/*
* This function is only called from enqueue_task ( ) , but also only updates
* the timestamp if it is already not set . It ' s assumed that
2022-03-05 21:17:59 +05:00
* sched_info_dequeue ( ) will clear that stamp when appropriate .
2021-10-27 18:46:41 +05:00
*/
2022-03-05 21:17:59 +05:00
static inline void sched_info_enqueue ( struct rq * rq , struct task_struct * t )
2021-10-27 18:46:41 +05:00
{
2022-03-05 21:17:59 +05:00
if ( ! t - > sched_info . last_queued )
t - > sched_info . last_queued = rq_clock ( rq ) ;
2021-10-27 18:46:41 +05:00
}
/*
* Called when a process ceases being the active - running process involuntarily
* due , typically , to expiring its time slice ( this may also be called when
* switching to the idle task ) . Now we can calculate how long we ran .
* Also , if the process is still in the TASK_RUNNING state , call
2022-03-05 21:17:59 +05:00
* sched_info_enqueue ( ) to mark that it has now again started waiting on
2021-10-27 18:46:41 +05:00
* the runqueue .
*/
static inline void sched_info_depart ( struct rq * rq , struct task_struct * t )
{
2022-03-05 21:17:59 +05:00
unsigned long long delta = rq_clock ( rq ) - t - > sched_info . last_arrival ;
2021-10-27 18:46:41 +05:00
rq_sched_info_depart ( rq , delta ) ;
2022-03-05 21:17:59 +05:00
if ( task_is_running ( t ) )
sched_info_enqueue ( rq , t ) ;
2021-10-27 18:46:41 +05:00
}
/*
* Called when tasks are switched involuntarily due , typically , to expiring
* their time slice . ( This may also be called when switching to or from
* the idle task . ) We are only called when prev ! = next .
*/
static inline void
2022-03-05 21:17:59 +05:00
sched_info_switch ( struct rq * rq , struct task_struct * prev , struct task_struct * next )
2021-10-27 18:46:41 +05:00
{
/*
2022-03-05 21:17:59 +05:00
* prev now departs the CPU . It ' s not interesting to record
2021-10-27 18:46:41 +05:00
* stats about how efficient we were at scheduling the idle
* process , however .
*/
if ( prev ! = rq - > idle )
sched_info_depart ( rq , prev ) ;
if ( next ! = rq - > idle )
sched_info_arrive ( rq , next ) ;
}
2021-11-13 09:26:51 +05:00
2022-03-05 21:17:59 +05:00
# else /* !CONFIG_SCHED_INFO: */
# define sched_info_enqueue(rq, t) do { } while (0)
# define sched_info_dequeue(rq, t) do { } while (0)
# define sched_info_switch(rq, t, next) do { } while (0)
# endif /* CONFIG_SCHED_INFO */