This updates the proxy-exec logic in android16-6.12 which was added at v14, to be synced with the v18 series of the patchset. v14 series: https://github.com/johnstultz-work/linux-dev/commits/proxy-exec-v14-6.12 v18 series: https://github.com/johnstultz-work/linux-dev/commits/proxy-exec-v18-6.12 Changes since v14: * Improved naming consistency and using the guard macro where appropriate * Improved comments * Build fixes for !CONFIG_SMP * Fixes for when sched_proxy_exec() is disabled * Renamed update_curr_se to update_se_times, as suggested by Steven Rostedt. * Use put_prev_set_next_task as suggested by K Prateek Nayak * Try to rework find_proxy_task() locking to use guard and proxy_deactivate_task() in the way Peter suggested. * Simplified changes to enqueue_task_rt to match deadline's logic, as pointed out by Peter * Get rid of preserve_need_resched flag and rework per Peter's suggestion * Rework find_proxy_task() to use guard to cleanup the exit gotos as Peter suggested. * Reworked the forced return-migration from find_proxy_task to use Peter’s dequeue+wakeup approach, which helps resolve the cpuhotplug issues I had also seen, caused by the manual return migration sending tasks to offline cpus. * A number of improvements to the commit messages and comments suggested by Juri Lelli and Peter Zijlstra * Added missing logic to put_prev_task_dl as pointed out by K Prateek Nayak * Add lockdep_assert_held_once and drop the READ_ONCE in __get_task_blocked_on(), as suggested by Juri Lelli * Moved update_curr_task logic into update_curr_se to simplify things * Renamed update_se_times to update_se, as suggested by Peter * Reworked logic to fix an issue Peter pointed out with thread group accounting being done on the donor, rather than the running execution context. * Fixed typos caught by Metin Kaya * Suleiman Souhlal noticed an inefficiency in that we evaluate if the lock owner’s task_cpu() is the current cpu, before we look to see if the lock owner is on_rq at all. With v17 this would result in us proxy-migrating a donor to a remote cpu, only to then realize the task wasn’t even on the runqueue, and doing the sleeping owner enqueuing. Suleiman suggested instead that we evaluate on_rq first, so we can immediately do sleeping owner enqueueing. Then only if the owner is on a runqueue do we proxy-migrate the donor (which requires the more costly lock juggling). While not a huge logical change, it did uncover other problems, which needed to be resolved. * One issue found was there was a race where if do_activate_blocked_waiter() from the sleeping owner wakeup was delayed and the task had already been woken up elsewhere. It’s possible if that task was running and called into schedule() to be blocked, it would be dequeued from the runqueue, but before we switched to the new task, do_activate_blocked_waiter() might try to activate it on a different cpu. Clearly the do_activate_blocked_waiter() needed to check the task on_cpu value as well. * I found that we still can hit wakeups that end up skipping the BO_WAKING -> BO_RUNNALBE transition (causing find_proxy_task() to end up spinning waiting for that transition), so I re-added the logic to handle doing return migrations from find_proxy_task() if we hit that case. * Hupu suggested a tweak in ttwu_runnable() to evaluate proxy_needs_return() slightly earlier. * Kuyo Chang reported and isolated a fix for a problem with __task_is_pushable() in the !sched_proxy_exec case, which was folded into the “sched: Fix rt/dl load balancing via chain level balance” patch * Reworked some of the logic around releasing the rq->donor reference on migrations, using rq->idle directly. * Sueliman also pointed out that some added task_struct elements were not being initialized in the init_task code path, so that was good to fix. Bug: 427820735 Change-Id: I20ce778e474124a917dbf51378dc1301535ac858 Signed-off-by: John Stultz <jstultz@google.com>
120 lines
3.1 KiB
C
120 lines
3.1 KiB
C
/*
|
|
* Debugging code for mutexes
|
|
*
|
|
* Started by Ingo Molnar:
|
|
*
|
|
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
|
*
|
|
* lock debugging, locking tree, deadlock detection started by:
|
|
*
|
|
* Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
|
|
* Released under the General Public License (GPL).
|
|
*/
|
|
#include <linux/mutex.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/device.h>
|
|
#include <linux/export.h>
|
|
#include <linux/poison.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/kallsyms.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/debug_locks.h>
|
|
|
|
#include "mutex.h"
|
|
|
|
/*
|
|
* Must be called with lock->wait_lock held.
|
|
*/
|
|
void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
|
|
{
|
|
memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
|
|
waiter->magic = waiter;
|
|
INIT_LIST_HEAD(&waiter->list);
|
|
waiter->ww_ctx = MUTEX_POISON_WW_CTX;
|
|
}
|
|
|
|
void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
|
|
{
|
|
lockdep_assert_held(&lock->wait_lock);
|
|
DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list));
|
|
DEBUG_LOCKS_WARN_ON(waiter->magic != waiter);
|
|
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
|
|
}
|
|
|
|
void debug_mutex_free_waiter(struct mutex_waiter *waiter)
|
|
{
|
|
DEBUG_LOCKS_WARN_ON(!list_empty(&waiter->list));
|
|
memset(waiter, MUTEX_DEBUG_FREE, sizeof(*waiter));
|
|
}
|
|
|
|
void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
|
struct task_struct *task)
|
|
{
|
|
lockdep_assert_held(&lock->wait_lock);
|
|
|
|
/* Current thread can't be already blocked (since it's executing!) */
|
|
DEBUG_LOCKS_WARN_ON(__get_task_blocked_on(task));
|
|
}
|
|
|
|
void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
|
|
struct task_struct *task)
|
|
{
|
|
struct mutex *blocked_on = __get_task_blocked_on(task);
|
|
|
|
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
|
|
DEBUG_LOCKS_WARN_ON(waiter->task != task);
|
|
DEBUG_LOCKS_WARN_ON(blocked_on && blocked_on != lock);
|
|
|
|
INIT_LIST_HEAD(&waiter->list);
|
|
waiter->task = NULL;
|
|
}
|
|
|
|
void debug_mutex_unlock(struct mutex *lock)
|
|
{
|
|
if (likely(debug_locks)) {
|
|
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
|
|
DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
|
|
}
|
|
}
|
|
|
|
void debug_mutex_init(struct mutex *lock, const char *name,
|
|
struct lock_class_key *key)
|
|
{
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
/*
|
|
* Make sure we are not reinitializing a held lock:
|
|
*/
|
|
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
|
|
lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
|
|
#endif
|
|
lock->magic = lock;
|
|
}
|
|
|
|
static void devm_mutex_release(void *res)
|
|
{
|
|
mutex_destroy(res);
|
|
}
|
|
|
|
int __devm_mutex_init(struct device *dev, struct mutex *lock)
|
|
{
|
|
return devm_add_action_or_reset(dev, devm_mutex_release, lock);
|
|
}
|
|
EXPORT_SYMBOL_GPL(__devm_mutex_init);
|
|
|
|
/***
|
|
* mutex_destroy - mark a mutex unusable
|
|
* @lock: the mutex to be destroyed
|
|
*
|
|
* This function marks the mutex uninitialized, and any subsequent
|
|
* use of the mutex is forbidden. The mutex must not be locked when
|
|
* this function is called.
|
|
*/
|
|
void mutex_destroy(struct mutex *lock)
|
|
{
|
|
DEBUG_LOCKS_WARN_ON(mutex_is_locked(lock));
|
|
lock->magic = NULL;
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mutex_destroy);
|