Merge android16-6.12 into android16-6.12-lts

This merges the android16-6.12 branch into the -lts branch, catching
it up with the latest changes in there.

Resolves merge conflicts in:
	kernel/sched/core.c

It contains the following commits:

* 2bd1f36314 ANDROID: gunyah: Add new VM status to handle reset failure
* e740e8d9ea ANDROID: Sync proxy-exec logic to v19 (from v18)
* 7d6f7afb0d UPSTREAM: perf/core: Clean up perf_try_init_event()
* 3f5de81785 ANDROID: GKI: Update xiaomi symbol list.
* 5d3d6f75d5 BACKPORT: hung_task: show the blocker task if the task is hung on mutex
* 6b4fffd5b2 FROMGIT: pinmux: fix race causing mux_owner NULL with active mux_usecount
* 9f514cf3d9 ANDROID: GKI: add GKI symbol list for Exynosauto SoC

Change-Id: I26e0399d7c0a55ff40ea1e915b41ce8320bff30b
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2025-07-15 07:42:24 +00:00
16 changed files with 693 additions and 435 deletions

View File

@@ -236,6 +236,15 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
if (desc->mux_usecount)
return NULL;
}
if (gpio_range) {
owner = desc->gpio_owner;
desc->gpio_owner = NULL;
} else {
owner = desc->mux_owner;
desc->mux_owner = NULL;
desc->mux_setting = NULL;
}
}
/*
@@ -247,17 +256,6 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
else if (ops->free)
ops->free(pctldev, pin);
scoped_guard(mutex, &desc->mux_lock) {
if (gpio_range) {
owner = desc->gpio_owner;
desc->gpio_owner = NULL;
} else {
owner = desc->mux_owner;
desc->mux_owner = NULL;
desc->mux_setting = NULL;
}
}
module_put(pctldev->owner);
return owner;

View File

@@ -8,6 +8,7 @@
#include <linux/gunyah.h>
#include <linux/notifier.h>
#include <linux/types.h>
#include <linux/android_kabi.h>
#define GUNYAH_VMID_INVAL U16_MAX
#define GUNYAH_MEM_HANDLE_INVAL U32_MAX
@@ -48,8 +49,10 @@ enum gunyah_rm_vm_status {
GUNYAH_RM_VM_STATUS_EXITED = 9,
GUNYAH_RM_VM_STATUS_RESETTING = 10,
GUNYAH_RM_VM_STATUS_RESET = 11,
GUNYAH_RM_VM_STATUS_RESET_FAILED = 12,
/* clang-format on */
};
ANDROID_KABI_ENUMERATOR_IGNORE(gunyah_rm_vm_status, GUNYAH_RM_VM_STATUS_RESET_FAILED);
struct gunyah_rm_vm_status_payload {
__le16 vmid;

View File

@@ -583,11 +583,19 @@ static int gunyah_vm_rm_notification_status(struct gunyah_vm *ghvm, void *data)
return NOTIFY_OK;
/* All other state transitions are synchronous to a corresponding RM call */
if (payload->vm_status == GUNYAH_RM_VM_STATUS_RESET) {
switch (payload->vm_status) {
case GUNYAH_RM_VM_STATUS_RESET_FAILED:
dev_warn(ghvm->parent, "VM: %u RESET failed with status %u\n",
ghvm->vmid, payload->vm_status);
fallthrough;
case GUNYAH_RM_VM_STATUS_RESET:
down_write(&ghvm->status_lock);
ghvm->vm_status = payload->vm_status;
up_write(&ghvm->status_lock);
wake_up(&ghvm->vm_status_wait);
break;
default:
break;
}
return NOTIFY_DONE;
@@ -1147,7 +1155,8 @@ static void _gunyah_vm_put(struct kref *kref)
/* clang-format off */
if (!ret)
wait_event(ghvm->vm_status_wait,
ghvm->vm_status == GUNYAH_RM_VM_STATUS_RESET);
(ghvm->vm_status == GUNYAH_RM_VM_STATUS_RESET) ||
(ghvm->vm_status == GUNYAH_RM_VM_STATUS_RESET_FAILED));
else
dev_err(ghvm->parent, "Failed to reset the vm: %d\n", ret);

View File

@@ -357070,6 +357070,10 @@ enumeration {
name: "GUNYAH_RM_VM_STATUS_RESET"
value: 11
}
enumerator {
name: "GUNYAH_RM_VM_STATUS_RESET_FAILED"
value: 12
}
}
}
enumeration {
@@ -378677,6 +378681,14 @@ function {
parameter_id: 0x1ad7b8dd
parameter_id: 0x01222f7d
}
function {
id: 0x1782373c
return_type_id: 0x48b5725f
parameter_id: 0x17a37c1c
parameter_id: 0xbdd18903
parameter_id: 0x18bd6530
parameter_id: 0xf435685e
}
function {
id: 0x1784717f
return_type_id: 0x48b5725f
@@ -388582,6 +388594,13 @@ function {
return_type_id: 0x18bd6530
parameter_id: 0x140c6eab
}
function {
id: 0x53d4f2c3
return_type_id: 0x18bd6530
parameter_id: 0x17a37c1c
parameter_id: 0x25e45be7
parameter_id: 0xf435685e
}
function {
id: 0x53d7cb1d
return_type_id: 0x0930c6d9
@@ -480448,6 +480467,42 @@ elf_symbol {
type_id: 0x15708e94
full_name: "pci_epc_init_notify"
}
elf_symbol {
id: 0xb862467e
name: "pci_epc_mem_alloc_addr"
is_defined: true
symbol_type: FUNCTION
crc: 0xb9fa0d06
type_id: 0x53d4f2c3
full_name: "pci_epc_mem_alloc_addr"
}
elf_symbol {
id: 0xffb38889
name: "pci_epc_mem_free_addr"
is_defined: true
symbol_type: FUNCTION
crc: 0x0f81c98a
type_id: 0x1782373c
full_name: "pci_epc_mem_free_addr"
}
elf_symbol {
id: 0x8a8d0c1a
name: "pci_epc_set_bar"
is_defined: true
symbol_type: FUNCTION
crc: 0x7dc4416e
type_id: 0x98c78004
full_name: "pci_epc_set_bar"
}
elf_symbol {
id: 0xb7b28cad
name: "pci_epc_unmap_addr"
is_defined: true
symbol_type: FUNCTION
crc: 0xba4a789a
type_id: 0x15dd5f06
full_name: "pci_epc_unmap_addr"
}
elf_symbol {
id: 0x3852868c
name: "pci_find_bus"
@@ -480628,6 +480683,15 @@ elf_symbol {
type_id: 0x5373932e
full_name: "pci_ioremap_bar"
}
elf_symbol {
id: 0x231d5ec9
name: "pci_ioremap_wc_bar"
is_defined: true
symbol_type: FUNCTION
crc: 0x45d79d6f
type_id: 0x5373932e
full_name: "pci_ioremap_wc_bar"
}
elf_symbol {
id: 0xec861eec
name: "pci_iounmap"
@@ -480745,6 +480809,15 @@ elf_symbol {
type_id: 0x99f942bc
full_name: "pci_msix_vec_count"
}
elf_symbol {
id: 0x2f7fc8d4
name: "pci_num_vf"
is_defined: true
symbol_type: FUNCTION
crc: 0x94842ef3
type_id: 0x99f942bc
full_name: "pci_num_vf"
}
elf_symbol {
id: 0x180e6903
name: "pci_pio_to_address"
@@ -514188,6 +514261,10 @@ interface {
symbol_id: 0xc80fa4a7
symbol_id: 0x39db373c
symbol_id: 0x9df0dd4c
symbol_id: 0xb862467e
symbol_id: 0xffb38889
symbol_id: 0x8a8d0c1a
symbol_id: 0xb7b28cad
symbol_id: 0x3852868c
symbol_id: 0x27f20808
symbol_id: 0x63876663
@@ -514208,6 +514285,7 @@ interface {
symbol_id: 0x9c6c58ea
symbol_id: 0x2fefe933
symbol_id: 0x1c994923
symbol_id: 0x231d5ec9
symbol_id: 0xec861eec
symbol_id: 0x1279792b
symbol_id: 0x5810bdc3
@@ -514221,6 +514299,7 @@ interface {
symbol_id: 0x9a271493
symbol_id: 0xcb617d2b
symbol_id: 0x9d3ae050
symbol_id: 0x2f7fc8d4
symbol_id: 0x180e6903
symbol_id: 0xf32782d8
symbol_id: 0x258080ab

View File

@@ -10,3 +10,6 @@ type 'struct sched_dl_entity' changed
type 'struct f2fs_sb_info' changed
member 'union { unsigned int reserved_pin_section; unsigned char __kabi_ignored0; }' was added
type 'enum gunyah_rm_vm_status' changed
enumerator 'GUNYAH_RM_VM_STATUS_RESET_FAILED' (12) was added

File diff suppressed because it is too large Load Diff

View File

@@ -112,6 +112,8 @@
__tracepoint_android_vh_alloc_pages_entry
__traceiter_android_vh_sysrq_crash
__tracepoint_android_vh_sysrq_crash
__traceiter_android_rvh_schedule_bug
__tracepoint_android_rvh_schedule_bug
#required by metis.ko module
__traceiter_android_vh_rwsem_read_wait_start

View File

@@ -202,4 +202,6 @@ DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T) == 0)
extern unsigned long mutex_get_owner(struct mutex *lock);
#endif /* __LINUX_MUTEX_H */

View File

@@ -1271,6 +1271,10 @@ struct task_struct {
#endif
raw_spinlock_t blocked_lock;
#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
struct mutex *blocker_mutex;
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
int non_block_count;
#endif
@@ -2235,6 +2239,29 @@ static inline void __set_blocked_on_waking(struct task_struct *p)
p->blocked_on_state = BO_WAKING;
}
static inline struct mutex *__get_task_blocked_on(struct task_struct *p)
{
lockdep_assert_held_once(&p->blocked_lock);
return p->blocked_on;
}
#ifndef CONFIG_PREEMPT_RT
static inline void set_blocked_on_waking_nested(struct task_struct *p, struct mutex *m)
{
raw_spin_lock_nested(&p->blocked_lock, SINGLE_DEPTH_NESTING);
WARN_ON_ONCE(__get_task_blocked_on(p) != m);
__set_blocked_on_waking(p);
raw_spin_unlock(&p->blocked_lock);
}
#else
static inline void set_blocked_on_waking_nested(struct task_struct *p, struct rt_mutex *m)
{
raw_spin_lock_nested(&p->blocked_lock, SINGLE_DEPTH_NESTING);
__set_blocked_on_waking(p);
raw_spin_unlock(&p->blocked_lock);
}
#endif
static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m)
{
WARN_ON_ONCE(!m);
@@ -2263,12 +2290,6 @@ static inline void __clear_task_blocked_on(struct task_struct *p, struct mutex *
p->blocked_on_state = BO_RUNNABLE;
}
static inline struct mutex *__get_task_blocked_on(struct task_struct *p)
{
lockdep_assert_held_once(&p->blocked_lock);
return p->blocked_on;
}
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());

View File

@@ -89,6 +89,43 @@ static struct notifier_block panic_block = {
.notifier_call = hung_task_panic,
};
#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
static void debug_show_blocker(struct task_struct *task)
{
struct task_struct *g, *t;
unsigned long owner;
struct mutex *lock;
RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "No rcu lock held");
lock = READ_ONCE(task->blocker_mutex);
if (!lock)
return;
owner = mutex_get_owner(lock);
if (unlikely(!owner)) {
pr_err("INFO: task %s:%d is blocked on a mutex, but the owner is not found.\n",
task->comm, task->pid);
return;
}
/* Ensure the owner information is correct. */
for_each_process_thread(g, t) {
if ((unsigned long)t == owner) {
pr_err("INFO: task %s:%d is blocked on a mutex likely owned by task %s:%d.\n",
task->comm, task->pid, t->comm, t->pid);
sched_show_task(t);
return;
}
}
}
#else
static inline void debug_show_blocker(struct task_struct *task)
{
}
#endif
static void check_hung_task(struct task_struct *t, unsigned long timeout)
{
unsigned long switch_count = t->nvcsw + t->nivcsw;
@@ -140,6 +177,7 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
" disables this message.\n");
sched_show_task(t);
debug_show_blocker(t);
hung_task_show_lock = true;
if (sysctl_hung_task_all_cpu_backtrace)

View File

@@ -76,6 +76,14 @@ static inline unsigned long __owner_flags(unsigned long owner)
return owner & MUTEX_FLAGS;
}
/* Do not use the return value as a pointer directly. */
unsigned long mutex_get_owner(struct mutex *lock)
{
unsigned long owner = atomic_long_read(&lock->owner);
return (unsigned long)__owner_task(owner);
}
/*
* Returns: __mutex_owner(lock) on failure or NULL on success.
*/
@@ -187,6 +195,10 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct list_head *list)
{
bool already_on_list = false;
#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
WRITE_ONCE(current->blocker_mutex, lock);
#endif
debug_mutex_add_waiter(lock, waiter, current);
trace_android_vh_alter_mutex_list_add(lock, waiter, list, &already_on_list);
@@ -204,6 +216,9 @@ __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
__mutex_clear_flag(lock, MUTEX_FLAGS);
debug_mutex_remove_waiter(lock, waiter, current);
#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
WRITE_ONCE(current->blocker_mutex, NULL);
#endif
}
/*

View File

@@ -284,16 +284,13 @@ __ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
#ifndef WW_RT
debug_mutex_wake_waiter(lock, waiter);
#endif
/* nested as we should hold current->blocked_lock already */
raw_spin_lock_nested(&waiter->task->blocked_lock, SINGLE_DEPTH_NESTING);
/*
* When waking up the task to die, be sure to set the
* blocked_on_state to BO_WAKING. Otherwise we can see
* circular blocked_on relationships that can't resolve.
*/
WARN_ON_ONCE(__get_task_blocked_on(waiter->task) != lock);
__set_blocked_on_waking(waiter->task);
raw_spin_unlock(&waiter->task->blocked_lock);
/* nested as we should hold current->blocked_lock already */
set_blocked_on_waking_nested(waiter->task, lock);
wake_q_add(wake_q, waiter->task);
}
@@ -342,15 +339,12 @@ static bool __ww_mutex_wound(struct MUTEX *lock,
* wakeup pending to re-read the wounded state.
*/
if (owner != current) {
/* nested as we should hold current->blocked_lock already */
raw_spin_lock_nested(&owner->blocked_lock, SINGLE_DEPTH_NESTING);
/*
* When waking up the task to wound, be sure to set the
* blocked_on_state to BO_WAKING. Otherwise we can see
* circular blocked_on relationships that can't resolve.
*/
__set_blocked_on_waking(owner);
raw_spin_unlock(&owner->blocked_lock);
set_blocked_on_waking_nested(owner, lock);
wake_q_add(wake_q, owner);
}
return true;

View File

@@ -5588,7 +5588,7 @@ static void zap_balance_callbacks(struct rq *rq)
}
rq->balance_callback = found ? &balance_push_callback : NULL;
}
#endif /* CONFIG_SCHED_PROXY_EXEC */
#endif
static void balance_push(struct rq *rq);
@@ -5658,11 +5658,9 @@ void balance_callbacks(struct rq *rq, struct balance_callback *head)
#else
#ifdef CONFIG_SCHED_PROXY_EXEC
static inline void zap_balance_callbacks(struct rq *rq)
{
}
#endif /* CONFIG_SCHED_PROXY_EXEC */
static inline void __balance_callbacks(struct rq *rq)
{
@@ -7143,11 +7141,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
/*
* Helper function for __schedule()
*
* If a task does not have signals pending, deactivate it
* Otherwise marks the task's __state as RUNNING
* Tries to deactivate the task, unless the should_block arg
* is false or if a signal is pending. In the case a signal
* is pending, marks the task's __state as RUNNING (and clear
* blocked_on).
*/
static bool try_to_block_task(struct rq *rq, struct task_struct *p,
unsigned long *task_state_p, bool deactivate_cond)
unsigned long *task_state_p, bool should_block)
{
unsigned long task_state = *task_state_p;
int flags = DEQUEUE_NOCLOCK;
@@ -7158,7 +7158,14 @@ static bool try_to_block_task(struct rq *rq, struct task_struct *p,
return false;
}
if (!deactivate_cond)
/*
* We check should_block after signal_pending because we
* will want to wake the task in that case. But if
* should_block is false, its likely due to the task being
* blocked on a mutex, and we want to keep it on the runqueue
* to be selectable for proxy-execution.
*/
if (!should_block)
return false;
p->sched_contributes_to_load =
@@ -7309,6 +7316,7 @@ static inline
void proxy_force_return(struct rq *rq, struct rq_flags *rf,
struct task_struct *p)
{
force_blocked_on_runnable(p);
}
static inline bool proxy_can_run_here(struct rq *rq, struct task_struct *p)
@@ -7692,7 +7700,14 @@ static void __sched notrace __schedule(int sched_mode)
goto picked;
}
} else if (!preempt && prev_state) {
block = try_to_block_task(rq, prev, &prev_state, !task_is_blocked(prev));
/*
* We pass task_is_blocked() as the should_block arg
* in order to keep mutex-blocked tasks on the runqueue
* for slection with proxy-exec (without proxy-exec
* task_is_blocked() will always be false).
*/
block = try_to_block_task(rq, prev, &prev_state,
!task_is_blocked(prev));
switch_count = &prev->nvcsw;
}

View File

@@ -1270,9 +1270,7 @@ static inline bool do_preempt_short(struct cfs_rq *cfs_rq,
*/
s64 update_curr_common(struct rq *rq)
{
struct task_struct *donor = rq->donor;
return update_se(rq, &donor->se);
return update_se(rq, &rq->donor->se);
}
/*

View File

@@ -2336,7 +2336,7 @@ static inline bool task_is_blocked(struct task_struct *p)
static inline int task_on_cpu(struct rq *rq, struct task_struct *p)
{
#ifdef CONFIG_SMP
return READ_ONCE(p->on_cpu);
return p->on_cpu;
#else
return task_current(rq, p);
#endif

View File

@@ -1266,6 +1266,17 @@ config BOOTPARAM_HUNG_TASK_PANIC
Say N if unsure.
config DETECT_HUNG_TASK_BLOCKER
bool "Dump Hung Tasks Blocker"
depends on DETECT_HUNG_TASK
depends on !PREEMPT_RT
default n
help
Say Y here to show the blocker task's stacktrace who acquires
the mutex lock which "hung tasks" are waiting.
This will add overhead a bit but shows suspicious tasks and
call trace if it comes from waiting a mutex.
config WQ_WATCHDOG
bool "Detect Workqueue Stalls"
depends on DEBUG_KERNEL