From dc7c02e14314a58593e0e48acd54fb7ada2ac552 Mon Sep 17 00:00:00 2001 From: Rick Yiu Date: Fri, 6 Jun 2025 05:28:50 +0000 Subject: [PATCH] ANDROID: Export symbols for vendor hooks Add symbols used by vendor hooks which are either missing from previous kernel or newly added for the new kernel. Bug: 417372724 Change-Id: I344fa69ecfaf06d677bab4ad2adca8d9b5af26e8 Signed-off-by: Rick Yiu --- kernel/cgroup/cpuset.c | 1 + kernel/sched/core.c | 3 +++ kernel/sched/fair.c | 1 + kernel/sched/pelt.c | 3 ++- 4 files changed, 7 insertions(+), 1 deletion(-) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index eb8aef0d4c8d..e57d77fe629d 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -3989,6 +3989,7 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) rcu_read_unlock(); spin_unlock_irqrestore(&callback_lock, flags); } +EXPORT_SYMBOL_GPL(cpuset_cpus_allowed); /** * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe. diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 2710eac23ee1..a17a5b0060d9 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2746,6 +2746,7 @@ out_unlock: put_task_struct(p); return 0; } +EXPORT_SYMBOL_GPL(push_cpu_stop); /* * sched_class::set_cpus_allowed must do the below, but is not required to @@ -11128,6 +11129,7 @@ const int sched_prio_to_weight[40] = { /* 10 */ 110, 87, 70, 56, 45, /* 15 */ 36, 29, 23, 18, 15, }; +EXPORT_SYMBOL_GPL(sched_prio_to_weight); /* * Inverse (2^32/x) values of the sched_prio_to_weight[] array, pre-calculated. @@ -11146,6 +11148,7 @@ const u32 sched_prio_to_wmult[40] = { /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, }; +EXPORT_SYMBOL_GPL(sched_prio_to_wmult); void call_trace_sched_update_nr_running(struct rq *rq, int count) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a92dbaebefc2..af2257e7332c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -94,6 +94,7 @@ unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) */ unsigned int sysctl_sched_base_slice = 750000ULL; +EXPORT_SYMBOL_GPL(sysctl_sched_base_slice); static unsigned int normalized_sysctl_sched_base_slice = 750000ULL; /* diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c index 8dd80827b67c..037a7962e5b0 100644 --- a/kernel/sched/pelt.c +++ b/kernel/sched/pelt.c @@ -176,7 +176,7 @@ accumulate_sum(u64 delta, struct sched_avg *sa, * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... ) * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}] */ -static __always_inline int +__always_inline int ___update_load_sum(u64 now, struct sched_avg *sa, unsigned long load, unsigned long runnable, int running) { @@ -228,6 +228,7 @@ ___update_load_sum(u64 now, struct sched_avg *sa, return 1; } +EXPORT_SYMBOL_GPL(___update_load_sum); /* * When syncing *_avg with *_sum, we must take into account the current