ANDROID: Sync proxy-exec logic to v19 (from v18)

Just aligning android16-6.12 tree to the changes
made in proxy-exec v19.

v18 series:
https://github.com/johnstultz-work/linux-dev/commits/proxy-exec-v18-6.12

v19 series:
https://github.com/johnstultz-work/linux-dev/commits/proxy-exec-v19-6.12

Changes since v18:
* Build fixup for PREEMPT_RT suggested by K Prateek Nayak
* Slight re-arrangement of functions to clean up later patches
* Slight tweak further simplifying update_curr_common() suggested by Peter
* Rename try_to_block_task() arguments and add additional comments
  suggested by Peter
* Push proxy_resched_idle() returning rq->idle to a later patch in the
  series, as suggested by Peter.
* Build fixup for !CONFIG_SMP and CONFIG_SCHED_PROXY_EXEC
* Fixed proxy_force_return() logic for !SMP cases

Bug: 431329485
Change-Id: I3dd8ddab236f9c1b68e2495a4023f5b474265aa6
Signed-off-by: John Stultz <jstultz@google.com>
This commit is contained in:
John Stultz
2025-07-12 03:08:19 +00:00
parent 7d6f7afb0d
commit e740e8d9ea
5 changed files with 49 additions and 26 deletions

View File

@@ -2239,6 +2239,29 @@ static inline void __set_blocked_on_waking(struct task_struct *p)
p->blocked_on_state = BO_WAKING;
}
static inline struct mutex *__get_task_blocked_on(struct task_struct *p)
{
lockdep_assert_held_once(&p->blocked_lock);
return p->blocked_on;
}
#ifndef CONFIG_PREEMPT_RT
static inline void set_blocked_on_waking_nested(struct task_struct *p, struct mutex *m)
{
raw_spin_lock_nested(&p->blocked_lock, SINGLE_DEPTH_NESTING);
WARN_ON_ONCE(__get_task_blocked_on(p) != m);
__set_blocked_on_waking(p);
raw_spin_unlock(&p->blocked_lock);
}
#else
static inline void set_blocked_on_waking_nested(struct task_struct *p, struct rt_mutex *m)
{
raw_spin_lock_nested(&p->blocked_lock, SINGLE_DEPTH_NESTING);
__set_blocked_on_waking(p);
raw_spin_unlock(&p->blocked_lock);
}
#endif
static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m)
{
WARN_ON_ONCE(!m);
@@ -2267,12 +2290,6 @@ static inline void __clear_task_blocked_on(struct task_struct *p, struct mutex *
p->blocked_on_state = BO_RUNNABLE;
}
static inline struct mutex *__get_task_blocked_on(struct task_struct *p)
{
lockdep_assert_held_once(&p->blocked_lock);
return p->blocked_on;
}
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());

View File

@@ -284,16 +284,13 @@ __ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
#ifndef WW_RT
debug_mutex_wake_waiter(lock, waiter);
#endif
/* nested as we should hold current->blocked_lock already */
raw_spin_lock_nested(&waiter->task->blocked_lock, SINGLE_DEPTH_NESTING);
/*
* When waking up the task to die, be sure to set the
* blocked_on_state to BO_WAKING. Otherwise we can see
* circular blocked_on relationships that can't resolve.
*/
WARN_ON_ONCE(__get_task_blocked_on(waiter->task) != lock);
__set_blocked_on_waking(waiter->task);
raw_spin_unlock(&waiter->task->blocked_lock);
/* nested as we should hold current->blocked_lock already */
set_blocked_on_waking_nested(waiter->task, lock);
wake_q_add(wake_q, waiter->task);
}
@@ -342,15 +339,12 @@ static bool __ww_mutex_wound(struct MUTEX *lock,
* wakeup pending to re-read the wounded state.
*/
if (owner != current) {
/* nested as we should hold current->blocked_lock already */
raw_spin_lock_nested(&owner->blocked_lock, SINGLE_DEPTH_NESTING);
/*
* When waking up the task to wound, be sure to set the
* blocked_on_state to BO_WAKING. Otherwise we can see
* circular blocked_on relationships that can't resolve.
*/
__set_blocked_on_waking(owner);
raw_spin_unlock(&owner->blocked_lock);
set_blocked_on_waking_nested(owner, lock);
wake_q_add(wake_q, owner);
}
return true;

View File

@@ -5588,7 +5588,7 @@ static void zap_balance_callbacks(struct rq *rq)
}
rq->balance_callback = found ? &balance_push_callback : NULL;
}
#endif /* CONFIG_SCHED_PROXY_EXEC */
#endif
static void balance_push(struct rq *rq);
@@ -5658,11 +5658,9 @@ void balance_callbacks(struct rq *rq, struct balance_callback *head)
#else
#ifdef CONFIG_SCHED_PROXY_EXEC
static inline void zap_balance_callbacks(struct rq *rq)
{
}
#endif /* CONFIG_SCHED_PROXY_EXEC */
static inline void __balance_callbacks(struct rq *rq)
{
@@ -7143,11 +7141,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
/*
* Helper function for __schedule()
*
* If a task does not have signals pending, deactivate it
* Otherwise marks the task's __state as RUNNING
* Tries to deactivate the task, unless the should_block arg
* is false or if a signal is pending. In the case a signal
* is pending, marks the task's __state as RUNNING (and clear
* blocked_on).
*/
static bool try_to_block_task(struct rq *rq, struct task_struct *p,
unsigned long task_state, bool deactivate_cond)
unsigned long task_state, bool should_block)
{
int flags = DEQUEUE_NOCLOCK;
@@ -7156,7 +7156,14 @@ static bool try_to_block_task(struct rq *rq, struct task_struct *p,
return false;
}
if (!deactivate_cond)
/*
* We check should_block after signal_pending because we
* will want to wake the task in that case. But if
* should_block is false, its likely due to the task being
* blocked on a mutex, and we want to keep it on the runqueue
* to be selectable for proxy-execution.
*/
if (!should_block)
return false;
p->sched_contributes_to_load =
@@ -7307,6 +7314,7 @@ static inline
void proxy_force_return(struct rq *rq, struct rq_flags *rf,
struct task_struct *p)
{
force_blocked_on_runnable(p);
}
static inline bool proxy_can_run_here(struct rq *rq, struct task_struct *p)
@@ -7690,6 +7698,12 @@ static void __sched notrace __schedule(int sched_mode)
goto picked;
}
} else if (!preempt && prev_state) {
/*
* We pass task_is_blocked() as the should_block arg
* in order to keep mutex-blocked tasks on the runqueue
* for slection with proxy-exec (without proxy-exec
* task_is_blocked() will always be false).
*/
block = try_to_block_task(rq, prev, prev_state,
!task_is_blocked(prev));
switch_count = &prev->nvcsw;

View File

@@ -1270,9 +1270,7 @@ static inline bool do_preempt_short(struct cfs_rq *cfs_rq,
*/
s64 update_curr_common(struct rq *rq)
{
struct task_struct *donor = rq->donor;
return update_se(rq, &donor->se);
return update_se(rq, &rq->donor->se);
}
/*

View File

@@ -2336,7 +2336,7 @@ static inline bool task_is_blocked(struct task_struct *p)
static inline int task_on_cpu(struct rq *rq, struct task_struct *p)
{
#ifdef CONFIG_SMP
return READ_ONCE(p->on_cpu);
return p->on_cpu;
#else
return task_current(rq, p);
#endif