Merge android16-6.12 into android16-6.12-lts

This merges the android16-6.12 branch into the -lts branch, catching
it up with the latest changes in there.

It contains the following commits:

* 21ed84930c UPSTREAM: Revert "usb: xhci: Implement xhci_handshake_check_state() helper"
* 5b3ae3bcbe BACKPORT: usb: xhci: Skip xhci_reset in xhci_resume if xhci is being removed
* 5c72e9faba ANDROID: rust_binder: adjust errors from death notifications
* 9e02edea7f ANDROID: rust_binder: use u64 for death cookie
* 4317f0aeff ANDROID: f2fs: fixup ABI break due to reserved_pin_section
* 25bdb4a624 Revert "ANDROID: ABI: update symbol list for honor"
* a76eb2b67b ANDROID: GKI: Update oplus symbol list
* 6222007a04 ANDROID: mm/readahead: add for bypass high order allocation
* 659d7bb454 ANDROID: ABI: Update symbol list for exynos
* 26937a37f5 ANDROID: MODVERSIONS: hide type definition in drivers/usb/core/driver.c
* 8760b6e4f5 ANDROID: usb: Add vendor hook for usb suspend and resume
* da662aecc8 FROMLIST: KVM: Avoid synchronize_srcu() in kvm_io_bus_register_dev()
* 4be05c6524 FROMLIST: KVM: arm64: vgic: Explicitly implement vgic_dist::ready ordering
* d6045efc66 FROMLIST: KVM: arm64: vgic-init: Remove vgic_ready() macro
* f06dd0cd35 ANDROID: rust_binder: release threads before refs
* 5bbd30a60b ANDROID: ABI: Update pixel symbol list
* bafbebf2ab ANDROID: GKI: Update symbol list for xiaomi
* b7b130b7cc ANDROID: export folio_deactivate() for GKI purpose.
* 41f730f9c4 ANDROID: GKI: update exynos symbol list
* 766ecae19f UPSTREAM: xhci: dbctty: disable ECHO flag by default
* 8ea40f5243 ANDROID: GKI: Update xiaomi symbol list.
* 5594b4731d ANDROID: vendor_hooks: export tracepoint symbols
* 0d4cc1daff ANDROID: KVM: arm64: Don't update IOMMU under memory pressure
* 672185e575 ANDROID: iommu/iommu: Handle multi-page deferred sg mappings
* 740d42d181 ANDROID: vendor_hooks: Add vendor_hook in futex to fix the OEM scheduling priority bug
* 6eb6f346ac ANDROID: ABI: Update symbol list for mtk
* c302079179 ANDROID: vendor_hooks: Add vendor hook for GenieZone demand paging
* 5c1cddc983 ANDROID: vendor_hooks: Add vendor hook for GenieZone para-virtualization
* d893caf112 ANDROID: ashmem_rust: Add support for retrieving an ashmem area's vmfile
* 0be74214c0 ANDROID: ashmem_rust: Add support for querying the size of an ashmem region
* eb50f663c4 ANDROID: ashmem_rust: Add support for providing an ashmem region's name
* 6bdbae6ea9 ANDROID: ashmem_rust: Add is_ashmem_file()
* 0d890f867e ANDROID: ABI: update symbol list for honor
* 12727f8a4b FROMGIT: f2fs: introduce reserved_pin_section sysfs entry
* 286cd9d628 ANDROID: GKI: Update RTK STB KMI symbol list
* 7b4f7682b5 ANDROID: GKI: Update symbol list for Amlogic
* 862ce4b2c4 ANDROID: KVM: arm64: iommu: Fix power tracking
* 61184996a8 ANDROID: drivers/iommu: Fix return value in iommu_map_sg
* acad0cd51d ANDROID: ABI: update symbol list for galaxy
* 393dbad32c ANDROID: vendor_hook: add condition to call for freezing fail
* b62fe47ba2 ANDROID: fix ashmem_rust return EINVAL bug in ashmem_rust.rs
* a7e1300b95 ANDROID: Revert "cpufreq: Avoid using inconsistent policy->min and policy->max"
* 15d2fe0544 ANDROID: qcom: Update the ABI symbol list
* f6ca783ba2 UPSTREAM: scsi: ufs: qcom: Check gear against max gear in vop freq_to_gear()
* 237708e9d3 ANDROID: GKI: Update symbols list file for honor White list the vm_normal_folio_pmd
* f18e354aa9 ANDROID: mm: export vm_normal_folio_pmd to allow vendors to implement simplified smaps
* c181c478b0 ANDROID: vendor_hooks: add hook to record slab free
* d2e452e197 ANDROID: Build fixups with PROXY_EXEC v18 + !CONFIG_SMP
* 4f9e4406e4 ANDROID: Update proxy-exec logic from v14 to v18
* 3fa8dabe1a ANDROID: GKI: update asr symbols list
* 94310b3f77 ANDROID: Add the dma header to aarch64 allowlist
* 880d6538c5 UPSTREAM: usb: gadget: u_serial: Fix race condition in TTY wakeup
* b115bf2302 ANDROID: ABI: Update symbol list for mtk
* e87018c5f9 FROMGIT: sched/deadline: Fix dl_server runtime calculation formula
* e2bf362ee2 FROMGIT: sched/core: Fix migrate_swap() vs. hotplug
* 06ca12d7d2 ANDROID: GKI: update the ABI symbol list
* 55972ed83a ANDROID: Fixup init_user_ns CRC change
* 4e873ad607 ANDROID: user: Add vendor hook to user for GKI purpose
* a097cd9c30 ANDROID: export find_user() for GKI purpose.
* 85b8233f7e ANDROID: rust_binder: use euid from the task
* 969c904869 ANDROID: ashmem: rename VmAreaNew->VmaNew
* 2ab3e5f283 ANDROID: rust_binder: rename VmAreaNew->VmaNew
* 2ef75ab83a ANDROID: rust_binder: use tgid_nr_ns for getting pid
* 6a2be11026 UPSTREAM: task: rust: rework how current is accessed
* 602e2300de UPSTREAM: rust: add PidNamespace
* 12dfc1d9cb UPSTREAM: rust: miscdevice: add mmap support
* 8e67cb756f UPSTREAM: mm: rust: add VmaNew for f_ops->mmap()
* bd140ddf75 UPSTREAM: mm: rust: add mmput_async support
* 0c50773076 UPSTREAM: mm: rust: add lock_vma_under_rcu
* 0b5465bb31 UPSTREAM: mm: rust: add vm_insert_page
* d7f52612c5 UPSTREAM: mm: rust: add vm_area_struct methods that require read access
* f03d4f7490 UPSTREAM: mm: rust: add abstraction for struct mm_struct
* 2ef6dbc73e BACKPORT: rust: miscdevice: change how f_ops vtable is constructed
* 1acd3b312f Revert "FROMLIST: mm: rust: add abstraction for struct mm_struct"
* a012c15566 Revert "FROMLIST: mm: rust: add vm_area_struct methods that require read access"
* 3be00a9bf8 Revert "FROMLIST: mm: rust: add vm_insert_page"
* 3aed88205e Revert "FROMLIST: mm: rust: add lock_vma_under_rcu"
* a121b6e72f Revert "FROMLIST: mm: rust: add mmput_async support"
* 9248564a81 Revert "FROMLIST: mm: rust: add VmAreaNew for f_ops->mmap()"
* 6de3ace5b5 Revert "FROMLIST: rust: miscdevice: add mmap support"
* b7f54dd23b Revert "BACKPORT: FROMLIST: task: rust: rework how current is accessed"
* 5913c80b22 ANDROID: iommu/arm-smmu-v3-kvm: Fix idmap free_leaf
* c40c54e669 UPSTREAM: erofs: impersonate the opener's credentials when accessing backing file
* 4d0200d0a9 BACKPORT: erofs: add 'fsoffset' mount option to specify filesystem offset
* 399deda7b5 ANDROID: scsi: ufs: add UFSHCD_ANDROID_QUIRK_NO_IS_READ_ON_H8
* f6b1ab83f6 ANDROID: rust_binder: remove binder_logs/procs/pid immediately
* dd35623c83 ANDROID: ABI: update symbol list for mtktv
* 58beebb30f FROMLIST: fuse: give wakeup hints to the scheduler
* 0f917e4066 ANDROID: virt: gunyah: Replace arm_smccc_1_1_smc with arm_smccc_1_1_invoke
* 33429dd323 UPSTREAM: posix-cpu-timers: fix race between handle_posix_cpu_timers() and posix_cpu_timer_del()
* 6483832947 ANDROID: GKI: Update symbol list file for xiaomi
* 668635cd34 UPSTREAM: usb: gadget: uvc: dont call usb_composite_setup_continue when not streaming

Change-Id: I64074144d1a6da9fdd3b4dd5f8314ccea4f9d9e8
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2025-07-13 12:16:44 +00:00
99 changed files with 58457 additions and 999 deletions

View File

@@ -183,6 +183,7 @@ filegroup(
# keep sorted
"gki/aarch64/symbols/allwinner",
"gki/aarch64/symbols/amlogic",
"gki/aarch64/symbols/asr",
"gki/aarch64/symbols/bcmstb",
"gki/aarch64/symbols/db845c",
"gki/aarch64/symbols/desktop",
@@ -1656,6 +1657,7 @@ ddk_headers(
ddk_headers(
name = "all_headers_allowlist_aarch64",
hdrs = [
"drivers/dma/dmaengine.h",
"drivers/extcon/extcon.h",
"drivers/pci/controller/dwc/pcie-designware.h",
"drivers/pci/pci.h",
@@ -1681,6 +1683,7 @@ ddk_headers(
linux_includes = [
"arch/arm64/include",
"arch/arm64/include/uapi",
"drivers/dma",
"drivers/extcon",
"drivers/pci",
"drivers/pci/controller/dwc",

View File

@@ -859,3 +859,12 @@ Description: This is a read-only entry to show the value of sb.s_encoding_flags,
SB_ENC_STRICT_MODE_FL 0x00000001
SB_ENC_NO_COMPAT_FALLBACK_FL 0x00000002
============================ ==========
What: /sys/fs/f2fs/<disk>/reserved_pin_section
Date: June 2025
Contact: "Chao Yu" <chao@kernel.org>
Description: This threshold is used to control triggering garbage collection while
fallocating on pinned file, so, it can guarantee there is enough free
reserved section before preallocating on pinned file.
By default, the value is ovp_sections, especially, for zoned ufs, the
value is 1.

View File

@@ -128,6 +128,7 @@ device=%s Specify a path to an extra device to be used together.
fsid=%s Specify a filesystem image ID for Fscache back-end.
domain_id=%s Specify a domain ID in fscache mode so that different images
with the same blobs under a given domain ID can share storage.
fsoffset=%llu Specify block-aligned filesystem offset for the primary device.
=================== =========================================================
Sysfs Entries

View File

@@ -1747,14 +1747,22 @@ struct kvm_iommu_sg {
unsigned int pgcount;
};
#define kvm_iommu_sg_nents_size(n) (PAGE_ALIGN((n) * sizeof(struct kvm_iommu_sg)))
static inline unsigned int kvm_iommu_sg_nents_round(unsigned int nents)
{
return kvm_iommu_sg_nents_size(nents) / sizeof(struct kvm_iommu_sg);
}
static inline struct kvm_iommu_sg *kvm_iommu_sg_alloc(unsigned int nents, gfp_t gfp)
{
return alloc_pages_exact(PAGE_ALIGN(nents * sizeof(struct kvm_iommu_sg)), gfp);
return alloc_pages_exact(kvm_iommu_sg_nents_size(nents), gfp);
}
static inline void kvm_iommu_sg_free(struct kvm_iommu_sg *sg, unsigned int nents)
{
free_pages_exact(sg, PAGE_ALIGN(nents * sizeof(struct kvm_iommu_sg)));
free_pages_exact(sg, kvm_iommu_sg_nents_size(nents));
}

View File

@@ -710,7 +710,6 @@ static int iommu_power_off(struct kvm_power_domain *pd)
int ret;
kvm_iommu_lock(iommu);
iommu->power_is_off = true;
ret = kvm_iommu_ops->suspend ? kvm_iommu_ops->suspend(iommu) : 0;
if (!ret)
iommu->power_is_off = true;

View File

@@ -490,18 +490,9 @@ int __pkvm_prot_finalize(void)
int host_stage2_unmap_reg_locked(phys_addr_t start, u64 size)
{
int ret;
hyp_assert_lock_held(&host_mmu.lock);
ret = kvm_pgtable_stage2_reclaim_leaves(&host_mmu.pgt, start, size);
if (ret)
return ret;
kvm_iommu_host_stage2_idmap(start, start + size, 0);
kvm_iommu_host_stage2_idmap_complete(false);
return 0;
return kvm_pgtable_stage2_reclaim_leaves(&host_mmu.pgt, start, size);
}
static int host_stage2_unmap_unmoveable_regs(void)

View File

@@ -513,7 +513,6 @@ int vgic_lazy_init(struct kvm *kvm)
* Also map the virtual CPU interface into the VM.
* v2 calls vgic_init() if not already done.
* v3 and derivatives return an error if the VGIC is not initialized.
* vgic_ready() returns true if this function has succeeded.
*/
int kvm_vgic_map_resources(struct kvm *kvm)
{
@@ -522,12 +521,12 @@ int kvm_vgic_map_resources(struct kvm *kvm)
gpa_t dist_base;
int ret = 0;
if (likely(vgic_ready(kvm)))
if (likely(smp_load_acquire(&dist->ready)))
return 0;
mutex_lock(&kvm->slots_lock);
mutex_lock(&kvm->arch.config_lock);
if (vgic_ready(kvm))
if (dist->ready)
goto out;
if (!irqchip_in_kernel(kvm))
@@ -553,14 +552,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
goto out_slots;
}
/*
* kvm_io_bus_register_dev() guarantees all readers see the new MMIO
* registration before returning through synchronize_srcu(), which also
* implies a full memory barrier. As such, marking the distributor as
* 'ready' here is guaranteed to be ordered after all vCPUs having seen
* a completely configured distributor.
*/
dist->ready = true;
smp_store_release(&dist->ready, true);
goto out_slots;
out:
mutex_unlock(&kvm->arch.config_lock);

View File

@@ -723,7 +723,7 @@ impl Node {
if len == inner.freeze_list.len() {
pr_warn!(
"Could not remove freeze listener for {}\n",
p.task.pid_in_current_ns()
p.pid_in_current_ns()
);
}
if inner.freeze_list.is_empty() {
@@ -959,7 +959,7 @@ struct NodeDeathInner {
pub(crate) struct NodeDeath {
node: DArc<Node>,
process: Arc<Process>,
pub(crate) cookie: usize,
pub(crate) cookie: u64,
#[pin]
links_track: AtomicTracker<0>,
/// Used by the owner `Node` to store a list of registered death notifications.
@@ -988,7 +988,7 @@ impl NodeDeath {
pub(crate) fn new(
node: DArc<Node>,
process: Arc<Process>,
cookie: usize,
cookie: u64,
) -> impl PinInit<DTRWrap<Self>> {
DTRWrap::new(pin_init!(
Self {

View File

@@ -282,7 +282,7 @@ impl ShrinkablePageRange {
}
/// Register a vma with this page range. Returns the size of the region.
pub(crate) fn register_with_vma(&self, vma: &virt::VmAreaNew) -> Result<usize> {
pub(crate) fn register_with_vma(&self, vma: &virt::VmaNew) -> Result<usize> {
let num_bytes = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
let num_pages = num_bytes >> PAGE_SHIFT;

View File

@@ -277,7 +277,7 @@ impl ProcessInner {
/// Finds a delivered death notification with the given cookie, removes it from the thread's
/// delivered list, and returns it.
fn pull_delivered_death(&mut self, cookie: usize) -> Option<DArc<NodeDeath>> {
fn pull_delivered_death(&mut self, cookie: u64) -> Option<DArc<NodeDeath>> {
let mut cursor = self.delivered_deaths.cursor_front();
while let Some(next) = cursor.peek_next() {
if next.cookie == cookie {
@@ -511,9 +511,13 @@ impl Process {
Ok(process)
}
pub(crate) fn pid_in_current_ns(&self) -> kernel::task::Pid {
self.task.tgid_nr_ns(None)
}
#[inline(never)]
pub(crate) fn debug_print_stats(&self, m: &SeqFile, ctx: &Context) -> Result<()> {
seq_print!(m, "proc {}\n", self.task.pid_in_current_ns());
seq_print!(m, "proc {}\n", self.pid_in_current_ns());
seq_print!(m, "context {}\n", &*ctx.name);
let inner = self.inner.lock();
@@ -561,7 +565,7 @@ impl Process {
#[inline(never)]
pub(crate) fn debug_print(&self, m: &SeqFile, ctx: &Context, print_all: bool) -> Result<()> {
seq_print!(m, "proc {}\n", self.task.pid_in_current_ns());
seq_print!(m, "proc {}\n", self.pid_in_current_ns());
seq_print!(m, "context {}\n", &*ctx.name);
let mut all_threads = KVec::new();
@@ -1065,7 +1069,7 @@ impl Process {
}
}
fn create_mapping(&self, vma: &mm::virt::VmAreaNew) -> Result {
fn create_mapping(&self, vma: &mm::virt::VmaNew) -> Result {
use kernel::page::PAGE_SIZE;
let size = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
let mapping = Mapping::new(vma.start(), size);
@@ -1172,11 +1176,7 @@ impl Process {
thread: &Thread,
) -> Result {
let handle: u32 = reader.read()?;
let cookie: usize = reader.read()?;
// TODO: First two should result in error, but not the others.
// TODO: Do we care about the context manager dying?
let cookie: u64 = reader.read()?;
// Queue BR_ERROR if we can't allocate memory for the death notification.
let death = UniqueArc::new_uninit(GFP_KERNEL).map_err(|err| {
@@ -1184,10 +1184,14 @@ impl Process {
err
})?;
let mut refs = self.node_refs.lock();
let info = refs.by_handle.get_mut(&handle).ok_or(EINVAL)?;
let Some(info) = refs.by_handle.get_mut(&handle) else {
pr_warn!("BC_REQUEST_DEATH_NOTIFICATION invalid ref {handle}\n");
return Ok(());
};
// Nothing to do if there is already a death notification request for this handle.
if info.death().is_some() {
pr_warn!("BC_REQUEST_DEATH_NOTIFICATION death notification already set\n");
return Ok(());
}
@@ -1220,15 +1224,22 @@ impl Process {
pub(crate) fn clear_death(&self, reader: &mut UserSliceReader, thread: &Thread) -> Result {
let handle: u32 = reader.read()?;
let cookie: usize = reader.read()?;
let cookie: u64 = reader.read()?;
let mut refs = self.node_refs.lock();
let info = refs.by_handle.get_mut(&handle).ok_or(EINVAL)?;
let Some(info) = refs.by_handle.get_mut(&handle) else {
pr_warn!("BC_CLEAR_DEATH_NOTIFICATION invalid ref {handle}\n");
return Ok(());
};
let death = info.death().take().ok_or(EINVAL)?;
let Some(death) = info.death().take() else {
pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification not active\n");
return Ok(());
};
if death.cookie != cookie {
*info.death() = Some(death);
return Err(EINVAL);
pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch\n");
return Ok(());
}
// Update state and determine if we need to queue a work item. We only need to do it when
@@ -1242,7 +1253,7 @@ impl Process {
Ok(())
}
pub(crate) fn dead_binder_done(&self, cookie: usize, thread: &Thread) {
pub(crate) fn dead_binder_done(&self, cookie: u64, thread: &Thread) {
if let Some(death) = self.inner.lock().pull_delivered_death(cookie) {
death.set_notification_done(thread);
}
@@ -1299,6 +1310,13 @@ impl Process {
work.into_arc().cancel();
}
// Take all threads and release them.
let threads = take(&mut self.inner.lock().threads);
for thread in threads.values() {
thread.release();
}
drop(threads);
// Free any resources kept alive by allocated buffers.
let omapping = self.inner.lock().mapping.take();
if let Some(mut mapping) = omapping {
@@ -1340,18 +1358,8 @@ impl Process {
}
drop(freeze_listeners);
// Do similar dance for the state lock.
let mut inner = self.inner.lock();
let threads = take(&mut inner.threads);
let nodes = take(&mut inner.nodes);
drop(inner);
// Release all threads.
for thread in threads.values() {
thread.release();
}
// Deliver death notifications.
let nodes = take(&mut self.inner.lock().nodes);
for node in nodes.values() {
loop {
let death = {
@@ -1552,11 +1560,13 @@ impl Process {
}
pub(crate) fn release(this: Arc<Process>, _file: &File) {
let binderfs_file;
let should_schedule;
{
let mut inner = this.inner.lock();
should_schedule = inner.defer_work == 0;
inner.defer_work |= PROC_DEFER_RELEASE;
binderfs_file = inner.binderfs_file.take();
}
if should_schedule {
@@ -1564,6 +1574,8 @@ impl Process {
// scheduled for execution.
let _ = workqueue::system().enqueue(this);
}
drop(binderfs_file);
}
pub(crate) fn flush(this: ArcBorrow<'_, Process>) -> Result {
@@ -1614,7 +1626,7 @@ impl Process {
pub(crate) fn mmap(
this: ArcBorrow<'_, Process>,
_file: &File,
vma: &mm::virt::VmAreaNew,
vma: &mm::virt::VmaNew,
) -> Result {
// We don't allow mmap to be used in a different process.
if !core::ptr::eq(kernel::current!().group_leader(), &*this.task) {

View File

@@ -455,7 +455,7 @@ unsafe extern "C" fn rust_binder_mmap(
// SAFETY: We previously set `private_data` in `rust_binder_open`.
let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
// SAFETY: The caller ensures that the vma is valid.
let area = unsafe { kernel::mm::virt::VmAreaNew::from_raw(vma) };
let area = unsafe { kernel::mm::virt::VmaNew::from_raw(vma) };
// SAFETY: The caller ensures that the file is valid.
match Process::mmap(f, unsafe { File::from_raw_file(file) }, area) {
Ok(()) => 0,

View File

@@ -1299,7 +1299,7 @@ impl Thread {
pr_warn!(
"Transaction failed: {:?} my_pid:{}",
err,
self.process.task.pid_in_current_ns()
self.process.pid_in_current_ns()
);
}
@@ -1584,7 +1584,7 @@ impl Thread {
pr_warn!(
"Write failure {:?} in pid:{}",
err,
self.process.task.pid_in_current_ns()
self.process.pid_in_current_ns()
);
req.read_consumed = 0;
writer.write(&req)?;
@@ -1601,7 +1601,7 @@ impl Thread {
pr_warn!(
"Read failure {:?} in pid:{}",
ret,
self.process.task.pid_in_current_ns()
self.process.pid_in_current_ns()
);
}
}

View File

@@ -122,7 +122,7 @@ impl Transaction {
debug_id,
target_node: Some(target_node),
from_parent,
sender_euid: from.process.cred.euid(),
sender_euid: from.process.task.euid(),
from: from.clone(),
to,
code: trd.code,
@@ -429,7 +429,7 @@ impl DeliverToRead for Transaction {
tr.sender_pid = 0;
if self.target_node.is_some() && self.flags & TF_ONE_WAY == 0 {
// Not a reply and not one-way.
tr.sender_pid = self.from.process.task.pid_in_current_ns();
tr.sender_pid = self.from.process.pid_in_current_ns();
}
let code = if self.target_node.is_none() {
BR_REPLY

View File

@@ -74,6 +74,9 @@
#include <trace/hooks/fuse.h>
#include <trace/hooks/ogki_honor.h>
#include <trace/hooks/suspend.h>
#include <trace/hooks/user.h>
#include <trace/hooks/gzvm.h>
#include <trace/hooks/usb.h>
/*
* Export tracepoints that act as a bare tracehook (ie: have no trace event
@@ -458,6 +461,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_filemap_map_pages);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_try_alloc_pages_gfp);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_cache_readahead_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_cache_readahead_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_cache_ra_order_bypass);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_filemap_fault_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_filemap_fault_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dma_heap_buffer_alloc_start);
@@ -572,3 +576,11 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_charge);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_filemap_add_folio);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_node);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpuset_fork);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_uid);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_user);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gzvm_vcpu_exit_reason);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gzvm_handle_demand_page_pre);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gzvm_handle_demand_page_post);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gzvm_destroy_vm_post_process);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_usb_dev_suspend);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_usb_dev_resume);

View File

@@ -543,6 +543,7 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
unsigned int idx;
unsigned int old_target_freq = target_freq;
target_freq = clamp_val(target_freq, policy->min, policy->max);
trace_android_vh_cpufreq_resolve_freq(policy, &target_freq, old_target_freq);
if (!policy->freq_table)
@@ -568,22 +569,7 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq)
{
unsigned int min = READ_ONCE(policy->min);
unsigned int max = READ_ONCE(policy->max);
/*
* If this function runs in parallel with cpufreq_set_policy(), it may
* read policy->min before the update and policy->max after the update
* or the other way around, so there is no ordering guarantee.
*
* Resolve this by always honoring the max (in case it comes from
* thermal throttling or similar).
*/
if (unlikely(min > max))
min = max;
return __resolve_freq(policy, clamp_val(target_freq, min, max),
CPUFREQ_RELATION_LE);
return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
@@ -2357,7 +2343,6 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (cpufreq_disabled())
return -ENODEV;
target_freq = clamp_val(target_freq, policy->min, policy->max);
target_freq = __resolve_freq(policy, target_freq, relation);
trace_android_vh_cpufreq_target(policy, &target_freq, old_target_freq);
@@ -2684,15 +2669,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
* Resolve policy min/max to available frequencies. It ensures
* no frequency resolution will neither overshoot the requested maximum
* nor undershoot the requested minimum.
*
* Avoid storing intermediate values in policy->max or policy->min and
* compiler optimizations around them because they may be accessed
* concurrently by cpufreq_driver_resolve_freq() during the update.
*/
WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max, CPUFREQ_RELATION_H));
new_data.min = __resolve_freq(policy, new_data.min, CPUFREQ_RELATION_L);
WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
policy->min = new_data.min;
policy->max = new_data.max;
policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
trace_cpu_frequency_limits(policy);
cpufreq_update_pressure(policy);

View File

@@ -411,6 +411,7 @@ struct kvm_arm_smmu_map_sg {
int prot;
gfp_t gfp;
unsigned int nents;
size_t total_mapped;
};
static struct iommu_map_cookie_sg *kvm_arm_smmu_alloc_cookie_sg(unsigned long iova,
@@ -424,16 +425,17 @@ static struct iommu_map_cookie_sg *kvm_arm_smmu_alloc_cookie_sg(unsigned long io
if (!map_sg)
return NULL;
map_sg->sg = kvm_iommu_sg_alloc(nents, gfp);
/* Rounds nents to allocate to page aligned size. */
map_sg->nents = kvm_iommu_sg_nents_round(nents);
map_sg->sg = kvm_iommu_sg_alloc(map_sg->nents, gfp);
if (!map_sg->sg)
return NULL;
map_sg->iova = iova;
map_sg->prot = prot;
map_sg->gfp = gfp;
map_sg->nents = nents;
ret = kvm_iommu_share_hyp_sg(map_sg->sg, nents);
ret = kvm_iommu_share_hyp_sg(map_sg->sg, map_sg->nents);
if (ret) {
kvm_iommu_sg_free(map_sg->sg, nents);
kvm_iommu_sg_free(map_sg->sg, map_sg->nents);
kfree(map_sg);
return NULL;
}
@@ -447,6 +449,17 @@ static int kvm_arm_smmu_add_deferred_map_sg(struct iommu_map_cookie_sg *cookie,
struct kvm_arm_smmu_map_sg *map_sg = container_of(cookie, struct kvm_arm_smmu_map_sg,
cookie);
struct kvm_iommu_sg *sg = map_sg->sg;
struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(map_sg->cookie.domain);
size_t mapped;
/* Out of space, flush the list. */
if (map_sg->nents == map_sg->ptr) {
mapped = kvm_iommu_map_sg(kvm_smmu_domain->id, sg, map_sg->iova,
map_sg->ptr, map_sg->prot, map_sg->gfp);
map_sg->ptr = 0;
map_sg->iova += mapped;
map_sg->total_mapped += mapped;
}
sg[map_sg->ptr].phys = paddr;
sg[map_sg->ptr].pgsize = pgsize;
@@ -461,11 +474,10 @@ static size_t kvm_arm_smmu_consume_deferred_map_sg(struct iommu_map_cookie_sg *c
cookie);
struct kvm_iommu_sg *sg = map_sg->sg;
struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(map_sg->cookie.domain);
size_t total_mapped;
total_mapped = kvm_iommu_map_sg(kvm_smmu_domain->id, sg, map_sg->iova, map_sg->ptr,
map_sg->prot, map_sg->gfp);
size_t total_mapped = map_sg->total_mapped;
total_mapped += kvm_iommu_map_sg(kvm_smmu_domain->id, sg, map_sg->iova,
map_sg->ptr, map_sg->prot, map_sg->gfp);
kvm_iommu_unshare_hyp_sg(sg, map_sg->nents);
kvm_iommu_sg_free(sg, map_sg->nents);
kfree(map_sg);

View File

@@ -928,6 +928,12 @@ static void smmu_tlb_add_page(struct iommu_iotlb_gather *gather,
static void smmu_free_leaf(unsigned long phys, size_t granule, void *cookie)
{
struct kvm_hyp_iommu_domain *domain = cookie;
/* No tracking for idmap domain. */
if (domain->domain_id == KVM_IOMMU_DOMAIN_IDMAP_ID)
return;
WARN_ON(iommu_pkvm_unuse_dma(phys, granule));
}

View File

@@ -2640,7 +2640,7 @@ static int __iommu_add_sg(struct iommu_map_cookie_sg *cookie_sg,
struct iommu_domain *domain = cookie_sg->domain;
const struct iommu_domain_ops *ops = domain->ops;
unsigned int min_pagesz;
size_t pgsize, count;
int ret = 0;
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
return -EINVAL;
@@ -2661,8 +2661,22 @@ static int __iommu_add_sg(struct iommu_map_cookie_sg *cookie_sg,
iova, &paddr, size, min_pagesz);
return -EINVAL;
}
pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
return ops->add_deferred_map_sg(cookie_sg, paddr, pgsize, count);
while (size) {
size_t pgsize, count, added;
pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
ret = ops->add_deferred_map_sg(cookie_sg, paddr, pgsize, count);
if (ret)
break;
added = pgsize * count;
size -= added;
iova += added;
paddr += added;
}
return ret;
}
ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
@@ -2722,9 +2736,9 @@ next:
size_t consumed;
consumed = ops->consume_deferred_map_sg(cookie_sg);
if (consumed != mapped) {
if (WARN_ON(consumed != mapped)) {
mapped = consumed;
ret = EINVAL;
ret = -EINVAL;
goto out_err;
}
}

View File

@@ -3,3 +3,4 @@ ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ASHMEM_C) += ashmem.o
obj-$(CONFIG_ASHMEM_RUST) += ashmem_rust.o
ashmem_rust-objs += ashmem_rust.o ashmem_rust_exports.o

View File

@@ -31,4 +31,9 @@ enum {
};
#endif
bool is_ashmem_file(struct file *file);
int ashmem_area_name(struct file *file, char *name);
long ashmem_area_size(struct file *file);
struct file *ashmem_area_vmfile(struct file *file);
#endif /* _LINUX_ASHMEM_H */

View File

@@ -12,7 +12,8 @@
use core::{
pin::Pin,
sync::atomic::{AtomicBool, AtomicUsize, Ordering},
ptr::null_mut,
sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering},
};
use kernel::{
bindings::{self, ASHMEM_GET_PIN_STATUS, ASHMEM_PIN, ASHMEM_UNPIN},
@@ -22,13 +23,14 @@ use kernel::{
fs::{File, LocalFile},
ioctl::_IOC_SIZE,
miscdevice::{loff_t, IovIter, Kiocb, MiscDevice, MiscDeviceOptions, MiscDeviceRegistration},
mm::virt::{flags as vma_flags, VmAreaNew},
mm::virt::{flags as vma_flags, VmaNew},
page::{page_align, PAGE_MASK, PAGE_SIZE},
page_size_compat::__page_align,
prelude::*,
seq_file::{seq_print, SeqFile},
sync::{new_mutex, Mutex, UniqueArc},
task::Task,
types::ForeignOwnable,
uaccess::{UserSlice, UserSliceReader, UserSliceWriter},
};
@@ -71,6 +73,7 @@ fn has_cap_sys_admin() -> bool {
static NUM_PIN_IOCTLS_WAITING: AtomicUsize = AtomicUsize::new(0);
static IGNORE_UNSET_PROT_READ: AtomicBool = AtomicBool::new(false);
static IGNORE_UNSET_PROT_EXEC: AtomicBool = AtomicBool::new(false);
static ASHMEM_FOPS_PTR: AtomicPtr<bindings::file_operations> = AtomicPtr::new(null_mut());
fn shrinker_should_stop() -> bool {
NUM_PIN_IOCTLS_WAITING.load(Ordering::Relaxed) > 0
@@ -104,13 +107,20 @@ impl kernel::Module for AshmemModule {
ashmem_range::set_shrinker_enabled(true)?;
let ashmem_miscdevice_registration = KBox::pin_init(
MiscDeviceRegistration::register(MiscDeviceOptions {
name: c_str!("ashmem"),
}),
GFP_KERNEL,
)?;
let ashmem_miscdevice_ptr = ashmem_miscdevice_registration.as_raw();
// SAFETY: ashmem_miscdevice_registration is pinned and is never destroyed, so reading
// and storing the fops pointer this way should be fine.
let fops_ptr = unsafe { (*ashmem_miscdevice_ptr).fops };
ASHMEM_FOPS_PTR.store(fops_ptr.cast_mut(), Ordering::Relaxed);
Ok(Self {
_misc: KBox::pin_init(
MiscDeviceRegistration::register(MiscDeviceOptions {
name: c_str!("ashmem"),
}),
GFP_KERNEL,
)?,
_misc: ashmem_miscdevice_registration,
_toggle_unpin: AshmemToggleMisc::<AshmemToggleShrinker>::new()?,
_toggle_read: AshmemToggleMisc::<AshmemToggleRead>::new()?,
_toggle_exec: AshmemToggleMisc::<AshmemToggleExec>::new()?,
@@ -155,7 +165,7 @@ impl MiscDevice for Ashmem {
)
}
fn mmap(me: Pin<&Ashmem>, _file: &File, vma: &VmAreaNew) -> Result<()> {
fn mmap(me: Pin<&Ashmem>, _file: &File, vma: &VmaNew) -> Result<()> {
let asma = &mut *me.inner.lock();
// User needs to SET_SIZE before mapping.
@@ -258,7 +268,7 @@ impl MiscDevice for Ashmem {
me.pin_unpin(cmd, UserSlice::new(arg, size).reader())
}
bindings::ASHMEM_PURGE_ALL_CACHES => me.purge_all_caches(),
_ => Err(EINVAL),
_ => Err(ENOTTY),
}
}
@@ -638,3 +648,107 @@ fn ashmem_memfd_ioctl_inner(file: &File, cmd: u32, arg: usize) -> Result<isize>
_ => Err(EINVAL),
}
}
/// # Safety
///
/// The caller must ensure that `file` is valid for the duration of this function.
#[no_mangle]
unsafe extern "C" fn is_ashmem_file(file: *mut bindings::file) -> bool {
let ashmem_fops_ptr = ASHMEM_FOPS_PTR.load(Ordering::Relaxed);
if file.is_null() || ashmem_fops_ptr.is_null() {
return false;
}
// SAFETY: Accessing the f_op field of a non-NULL file structure is always okay.
let fops_ptr = unsafe { (*file).f_op };
fops_ptr == ashmem_fops_ptr
}
/// # Safety
///
/// The caller must ensure that `file` references a valid file for the duration of 'a.
unsafe fn get_ashmem_area<'a>(file: *mut bindings::file) -> Result<&'a Ashmem, Error> {
// SAFETY: Caller ensures that file is valid, so this should be safe.
if unsafe { is_ashmem_file(file) } {
return Err(EINVAL);
}
// SAFETY: Given that this is an ashmem file, it should be safe to access the private_data
// field containing the Ashmem struct.
let private = unsafe { (*file).private_data };
// SAFETY: Since this is an ashmem file, we know the type of the struct and can reference it
// safely.
let ashmem = unsafe { <<Ashmem as MiscDevice>::Ptr as ForeignOwnable>::borrow(private) };
Ok(ashmem.get_ref())
}
/// # Safety
///
/// The caller must ensure the following prior to invoking this function:
/// 1. `name` is valid for writing and at least of size ASHMEM_FULL_NAME_LEN.
/// 2. `file` is valid for the duration of this function.
#[no_mangle]
unsafe extern "C" fn ashmem_area_name(
file: *mut bindings::file,
name: *mut kernel::ffi::c_char,
) -> c_int {
if name.is_null() {
return EINVAL.to_errno() as c_int;
}
// SAFETY: file is valid for the duration of this function.
match unsafe { get_ashmem_area(file) } {
Ok(ashmem) => {
let name_buffer = name.cast::<[u8; ASHMEM_FULL_NAME_LEN]>();
// SAFETY: Caller guarantees that the pointer is valid for writing.
ashmem.inner.lock().full_name(unsafe { &mut *name_buffer });
0
}
Err(err) => err.to_errno() as c_int,
}
}
/// # Safety
///
/// The caller must ensure that `file` is valid for the duration of this function.
#[no_mangle]
unsafe extern "C" fn ashmem_area_size(file: *mut bindings::file) -> isize {
// SAFETY: file is valid for the duration of this function.
let ashmem = match unsafe { get_ashmem_area(file) } {
Ok(area) => area,
Err(_err) => return 0,
};
match ashmem.get_size() {
Ok(size) => size,
Err(_err) => 0,
}
}
/// # Safety
///
/// The caller must ensure that `file` is valid for the duration of this function.
///
/// If this function returns a non-NULL pointer to a file structure, the refcount for that
/// file will be incremented by 1. It is the caller's responsibility to decrement the refcount
/// when the file is no longer needed.
#[no_mangle]
unsafe extern "C" fn ashmem_area_vmfile(file: *mut bindings::file) -> *mut bindings::file {
// SAFETY: file is valid for the duration of this function.
let ashmem = match unsafe { get_ashmem_area(file) } {
Ok(area) => area,
Err(_err) => return null_mut(),
};
let asma = &mut *ashmem.inner.lock();
match asma.file.as_ref() {
Some(shmem_file) => {
let shmem_file_ptr = shmem_file.file().as_ptr();
// SAFETY: file is valid for the duration of the function, which means shmem file is
// also valid at this point.
unsafe { bindings::get_file(shmem_file_ptr) };
shmem_file_ptr
}
None => null_mut(),
}
}

View File

@@ -0,0 +1,22 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Symbols exported from the Ashmem Rust driver for loadable kernel modules to use.
*
* Copyright (c) 2025, Google LLC.
*/
#include <linux/export.h>
#include "ashmem.h"
/*
* List symbols that need to be exported to loadable kernel modules below. This is needed because
* the logic that exports symbols from Rust crates only considers the crates under the rust/
* directory at the root of the kernel repo. It currently does not support exporting symbols from
* other crates.
*/
EXPORT_SYMBOL_GPL(is_ashmem_file);
EXPORT_SYMBOL_GPL(ashmem_area_name);
EXPORT_SYMBOL_GPL(ashmem_area_size);
EXPORT_SYMBOL_GPL(ashmem_area_vmfile);

View File

@@ -10,7 +10,7 @@ use kernel::{
ffi::{c_int, c_ulong},
fs::file::{File, LocalFile},
miscdevice::{loff_t, IovIter},
mm::virt::{vm_flags_t, VmAreaNew},
mm::virt::{vm_flags_t, VmaNew},
prelude::*,
str::CStr,
types::ARef,
@@ -37,8 +37,8 @@ pub(crate) unsafe fn file_set_fpos(file: &LocalFile, pos: loff_t) {
unsafe { (*file.as_ptr()).f_pos = pos };
}
pub(crate) fn vma_set_anonymous(vma: &VmAreaNew) {
// SAFETY: The `VmAreaNew` type is only used when the vma is being set up, so this operation is
pub(crate) fn vma_set_anonymous(vma: &VmaNew) {
// SAFETY: The `VmaNew` type is only used when the vma is being set up, so this operation is
// safe.
unsafe { (*vma.as_ptr()).vm_ops = core::ptr::null_mut() };
}
@@ -152,13 +152,13 @@ fn set_inode_lockdep_class(vmfile: &File) {
}
}
pub(crate) fn zero_setup(vma: &VmAreaNew) -> Result<()> {
// SAFETY: The `VmAreaNew` type is only used when the vma is being set up, so we can set up the
pub(crate) fn zero_setup(vma: &VmaNew) -> Result<()> {
// SAFETY: The `VmaNew` type is only used when the vma is being set up, so we can set up the
// vma.
to_result(unsafe { bindings::shmem_zero_setup(vma.as_ptr()) })
}
pub(crate) fn set_file(vma: &VmAreaNew, file: &File) {
pub(crate) fn set_file(vma: &VmaNew, file: &File) {
let file = ARef::from(file);
// SAFETY: We're setting up the vma, so we can read the file pointer.
let old_file = unsafe { (*vma.as_ptr()).vm_file };

View File

@@ -45,6 +45,8 @@
#undef CREATE_TRACE_POINTS
#include <trace/hooks/ufshcd.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(ufshcd_command);
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK)
@@ -7187,6 +7189,11 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
if (enabled_intr_status)
retval |= ufshcd_sl_intr(hba, enabled_intr_status);
if (hba->android_quirks &
UFSHCD_ANDROID_QUIRK_NO_IS_READ_ON_H8 &&
intr_status & UIC_HIBERNATE_ENTER)
break;
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
}

View File

@@ -1784,7 +1784,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
{
u32 gear = 0;
u32 gear = UFS_HS_DONT_CHANGE;
switch (freq) {
case 403000000:
@@ -1806,10 +1806,10 @@ static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
break;
default:
dev_err(hba->dev, "%s: Unsupported clock freq : %lu\n", __func__, freq);
break;
return UFS_HS_DONT_CHANGE;
}
return gear;
return min_t(u32, gear, hba->max_pwr_info.info.gear_rx);
}
/*

View File

@@ -34,6 +34,10 @@
#include "usb.h"
#include <trace/hooks/usb.h>
#include <linux/android_kabi.h>
ANDROID_KABI_DECLONLY(trace_eval_map);
/*
* Adds a new dynamic USBdevice ID to this driver,
@@ -1420,11 +1424,16 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
int status = 0;
int i = 0, n = 0;
struct usb_interface *intf;
bool bypass = false;
if (udev->state == USB_STATE_NOTATTACHED ||
udev->state == USB_STATE_SUSPENDED)
goto done;
trace_android_rvh_usb_dev_suspend(udev, msg, &bypass);
if (bypass)
goto done;
/* Suspend all the interfaces and then udev itself */
if (udev->actconfig) {
n = udev->actconfig->desc.bNumInterfaces;
@@ -1521,11 +1530,17 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
int status = 0;
int i;
struct usb_interface *intf;
bool bypass = false;
if (udev->state == USB_STATE_NOTATTACHED) {
status = -ENODEV;
goto done;
}
trace_android_vh_usb_dev_resume(udev, msg, &bypass);
if (bypass)
goto done;
udev->can_submit = 1;
/* Resume the device */

View File

@@ -294,8 +294,8 @@ __acquires(&port->port_lock)
break;
}
if (do_tty_wake && port->port.tty)
tty_wakeup(port->port.tty);
if (do_tty_wake)
tty_port_tty_wakeup(&port->port);
return status;
}
@@ -577,7 +577,7 @@ static int gs_start_io(struct gs_port *port)
gs_start_tx(port);
/* Unblock any pending writes into our circular buffer, in case
* we didn't in gs_start_tx() */
tty_wakeup(port->port.tty);
tty_port_tty_wakeup(&port->port);
} else {
/* Free reqs only if we are still connected */
if (port->port_usb) {

View File

@@ -496,6 +496,9 @@ uvc_v4l2_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
if (ret < 0)
return ret;
if (uvc->state != UVC_STATE_STREAMING)
return 0;
uvc->state = UVC_STATE_CONNECTED;
uvc_function_setup_continue(uvc, 1);
return 0;

View File

@@ -617,6 +617,7 @@ int dbc_tty_init(void)
dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
dbc_tty_driver->init_termios = tty_std_termios;
dbc_tty_driver->init_termios.c_lflag &= ~ECHO;
dbc_tty_driver->init_termios.c_cflag =
B9600 | CS8 | CREAD | HUPCL | CLOCAL;
dbc_tty_driver->init_termios.c_ispeed = 9600;

View File

@@ -461,9 +461,8 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
* In the future we should distinguish between -ENODEV and -ETIMEDOUT
* and try to recover a -ETIMEDOUT with a host controller reset.
*/
ret = xhci_handshake_check_state(xhci, &xhci->op_regs->cmd_ring,
CMD_RING_RUNNING, 0, 5 * 1000 * 1000,
XHCI_STATE_REMOVING);
ret = xhci_handshake(&xhci->op_regs->cmd_ring,
CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
if (ret < 0) {
xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret);
xhci_halt(xhci);

View File

@@ -83,29 +83,6 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
return ret;
}
/*
* xhci_handshake_check_state - same as xhci_handshake but takes an additional
* exit_state parameter, and bails out with an error immediately when xhc_state
* has exit_state flag set.
*/
int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr,
u32 mask, u32 done, int usec, unsigned int exit_state)
{
u32 result;
int ret;
ret = readl_poll_timeout_atomic(ptr, result,
(result & mask) == done ||
result == U32_MAX ||
xhci->xhc_state & exit_state,
1, usec);
if (result == U32_MAX || xhci->xhc_state & exit_state)
return -ENODEV;
return ret;
}
/*
* Disable interrupts and begin the xHCI halting process.
*/
@@ -226,8 +203,7 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
if (xhci->quirks & XHCI_INTEL_HOST)
udelay(1000);
ret = xhci_handshake_check_state(xhci, &xhci->op_regs->command,
CMD_RESET, 0, timeout_us, XHCI_STATE_REMOVING);
ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
if (ret)
return ret;
@@ -1091,7 +1067,10 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg)
xhci_dbg(xhci, "Stop HCD\n");
xhci_halt(xhci);
xhci_zero_64b_regs(xhci);
retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
if (xhci->xhc_state & XHCI_STATE_REMOVING)
retval = -ENODEV;
else
retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
spin_unlock_irq(&xhci->lock);
if (retval)
return retval;

View File

@@ -1875,8 +1875,6 @@ void xhci_skip_sec_intr_events(struct xhci_hcd *xhci,
/* xHCI host controller glue */
typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us);
int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr,
u32 mask, u32 done, int usec, unsigned int exit_state);
void xhci_quiesce(struct xhci_hcd *xhci);
int xhci_halt(struct xhci_hcd *xhci);
int xhci_start(struct xhci_hcd *xhci);

View File

@@ -4,6 +4,7 @@
*/
#include <linux/soc/mediatek/gzvm_drv.h>
#include <trace/hooks/gzvm.h>
static int cmp_ppages(struct rb_node *node, const struct rb_node *parent)
{
@@ -160,10 +161,14 @@ static int handle_single_demand_page(struct gzvm *vm, int memslot_id, u64 gfn)
if (unlikely(ret))
return -EFAULT;
trace_android_vh_gzvm_handle_demand_page_pre(vm, memslot_id, pfn, gfn, 1);
ret = gzvm_arch_map_guest(vm->vm_id, memslot_id, pfn, gfn, 1);
if (unlikely(ret))
return -EFAULT;
trace_android_vh_gzvm_handle_demand_page_post(vm, memslot_id, pfn, gfn, 1);
return ret;
}
@@ -207,12 +212,16 @@ static int handle_block_demand_page(struct gzvm *vm, int memslot_id, u64 gfn)
vm->demand_page_buffer[i] = pfn;
}
trace_android_vh_gzvm_handle_demand_page_pre(vm, memslot_id, 0, gfn, nr_entries);
ret = gzvm_arch_map_guest_block(vm->vm_id, memslot_id,
start_gfn, nr_entries);
if (unlikely(ret)) {
ret = -EFAULT;
goto err_unlock;
}
trace_android_vh_gzvm_handle_demand_page_post(vm, memslot_id, 0, gfn, nr_entries);
}
err_unlock:
mutex_unlock(&vm->demand_paging_lock);

View File

@@ -9,9 +9,9 @@
#include <linux/mm.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <trace/events/geniezone.h>
#include <linux/soc/mediatek/gzvm_drv.h>
#include <trace/events/geniezone.h>
#include <trace/hooks/gzvm.h>
/* maximum size needed for holding an integer */
#define ITOA_MAX_LEN 12
@@ -193,6 +193,7 @@ static long gzvm_vcpu_run(struct gzvm_vcpu *vcpu, void __user *argp)
pr_err("vcpu unknown exit\n");
need_userspace = true;
}
trace_android_vh_gzvm_vcpu_exit_reason(vcpu, &need_userspace);
}
if (copy_to_user(argp, vcpu->run, sizeof(struct gzvm_vcpu_run)))

View File

@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/soc/mediatek/gzvm_drv.h>
#include <trace/hooks/gzvm.h>
#include "gzvm_common.h"
static DEFINE_MUTEX(gzvm_list_lock);
@@ -374,6 +375,8 @@ static void gzvm_destroy_vm(struct gzvm *gzvm)
mutex_unlock(&gzvm->lock);
trace_android_vh_gzvm_destroy_vm_post_process(gzvm);
/* No need to lock here becauese it's single-threaded execution */
gzvm_destroy_all_ppage(gzvm);

View File

@@ -187,7 +187,7 @@ static bool gunyah_has_qcom_extensions(void)
uuid_t uuid;
u32 *up;
arm_smccc_1_1_smc(GUNYAH_QCOM_EXT_CALL_UUID_ID, &res);
arm_smccc_1_1_invoke(GUNYAH_QCOM_EXT_CALL_UUID_ID, &res);
up = (u32 *)&uuid.b[0];
up[0] = lower_32_bits(res.a0);

View File

@@ -28,7 +28,7 @@ void erofs_put_metabuf(struct erofs_buf *buf)
void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset,
enum erofs_kmap_type type)
{
pgoff_t index = offset >> PAGE_SHIFT;
pgoff_t index = (buf->off + offset) >> PAGE_SHIFT;
struct folio *folio = NULL;
if (buf->page) {
@@ -62,6 +62,7 @@ void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
struct erofs_sb_info *sbi = EROFS_SB(sb);
buf->file = NULL;
buf->off = sbi->dif0.fsoff;
if (erofs_is_fileio_mode(sbi)) {
buf->file = sbi->dif0.file; /* some fs like FUSE needs it */
buf->mapping = buf->file->f_mapping;
@@ -326,7 +327,7 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
iomap->private = buf.base;
} else {
iomap->type = IOMAP_MAPPED;
iomap->addr = mdev.m_pa;
iomap->addr = mdev.m_dif->fsoff + mdev.m_pa;
if (flags & IOMAP_DAX)
iomap->addr += mdev.m_dif->dax_part_off;
}

View File

@@ -47,6 +47,7 @@ static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret)
static void erofs_fileio_rq_submit(struct erofs_fileio_rq *rq)
{
const struct cred *old_cred;
struct iov_iter iter;
int ret;
@@ -60,7 +61,9 @@ static void erofs_fileio_rq_submit(struct erofs_fileio_rq *rq)
rq->iocb.ki_flags = IOCB_DIRECT;
iov_iter_bvec(&iter, ITER_DEST, rq->bvecs, rq->bio.bi_vcnt,
rq->bio.bi_iter.bi_size);
old_cred = override_creds(rq->iocb.ki_filp->f_cred);
ret = vfs_iocb_iter_read(rq->iocb.ki_filp, &rq->iocb, &iter);
revert_creds(old_cred);
if (ret != -EIOCBQUEUED)
erofs_fileio_ki_complete(&rq->iocb, ret);
}
@@ -147,7 +150,8 @@ io_retry:
if (err)
break;
io->rq = erofs_fileio_rq_alloc(&io->dev);
io->rq->bio.bi_iter.bi_sector = io->dev.m_pa >> 9;
io->rq->bio.bi_iter.bi_sector =
(io->dev.m_dif->fsoff + io->dev.m_pa) >> 9;
attached = 0;
}
if (!bio_add_folio(&io->rq->bio, folio, len, cur))

View File

@@ -51,7 +51,7 @@ struct erofs_device_info {
struct erofs_fscache *fscache;
struct file *file;
struct dax_device *dax_dev;
u64 dax_part_off;
u64 fsoff, dax_part_off;
u32 blocks;
u32 mapped_blkaddr;
@@ -213,6 +213,7 @@ enum erofs_kmap_type {
struct erofs_buf {
struct address_space *mapping;
struct file *file;
u64 off;
struct page *page;
void *base;
enum erofs_kmap_type kmap_type;

View File

@@ -383,7 +383,7 @@ static void erofs_default_options(struct erofs_sb_info *sbi)
enum {
Opt_user_xattr, Opt_acl, Opt_cache_strategy, Opt_dax, Opt_dax_enum,
Opt_device, Opt_fsid, Opt_domain_id, Opt_directio,
Opt_device, Opt_fsid, Opt_domain_id, Opt_directio, Opt_fsoffset,
Opt_err
};
@@ -411,6 +411,7 @@ static const struct fs_parameter_spec erofs_fs_parameters[] = {
fsparam_string("fsid", Opt_fsid),
fsparam_string("domain_id", Opt_domain_id),
fsparam_flag_no("directio", Opt_directio),
fsparam_u64("fsoffset", Opt_fsoffset),
{}
};
@@ -534,6 +535,9 @@ static int erofs_fc_parse_param(struct fs_context *fc,
errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
#endif
break;
case Opt_fsoffset:
sbi->dif0.fsoff = result.uint_64;
break;
default:
return -ENOPARAM;
}
@@ -676,6 +680,14 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
}
}
if (sbi->dif0.fsoff) {
if (sbi->dif0.fsoff & (sb->s_blocksize - 1))
return invalfc(fc, "fsoffset %llu is not aligned to block size %lu",
sbi->dif0.fsoff, sb->s_blocksize);
if (erofs_is_fscache_mode(sb))
return invalfc(fc, "cannot use fsoffset in fscache mode");
}
if (test_opt(&sbi->opt, DAX_ALWAYS)) {
if (!sbi->dif0.dax_dev) {
errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
@@ -1005,6 +1017,8 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
if (sbi->domain_id)
seq_printf(seq, ",domain_id=%s", sbi->domain_id);
#endif
if (sbi->dif0.fsoff)
seq_printf(seq, ",fsoffset=%llu", sbi->dif0.fsoff);
return 0;
}

View File

@@ -1773,7 +1773,8 @@ drain_io:
bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS,
REQ_OP_READ, GFP_NOIO);
bio->bi_end_io = z_erofs_endio;
bio->bi_iter.bi_sector = cur >> 9;
bio->bi_iter.bi_sector =
(mdev.m_dif->fsoff + cur) >> 9;
bio->bi_private = q[JQ_SUBMIT];
if (readahead)
bio->bi_opf |= REQ_RAHEAD;

View File

@@ -22,6 +22,8 @@
#include <trace/events/f2fs.h>
#include <trace/hooks/fs.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(f2fs_write_checkpoint);
#define DEFAULT_CHECKPOINT_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_RT, 3))
static struct kmem_cache *ino_entry_slab;

View File

@@ -1715,6 +1715,10 @@ struct f2fs_sb_info {
/* threshold for gc trials on pinned files */
unsigned short gc_pin_file_threshold;
/* free sections reserved for pinned file */
ANDROID_KABI_IGNORE(0, unsigned int reserved_pin_section);
struct f2fs_rwsem pin_sem;
/* maximum # of trials to find a victim segment for SSR and GC */

View File

@@ -38,6 +38,9 @@
#undef CREATE_TRACE_POINTS
#include <trace/hooks/fs.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(f2fs_sync_file_enter);
EXPORT_TRACEPOINT_SYMBOL_GPL(f2fs_sync_file_exit);
static void f2fs_zero_post_eof_page(struct inode *inode, loff_t new_size)
{
loff_t old_size = i_size_read(inode);
@@ -1892,9 +1895,8 @@ next_alloc:
}
}
if (has_not_enough_free_secs(sbi, 0, f2fs_sb_has_blkzoned(sbi) ?
ZONED_PIN_SEC_REQUIRED_COUNT :
GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
if (has_not_enough_free_secs(sbi, 0,
sbi->reserved_pin_section)) {
f2fs_down_write(&sbi->gc_lock);
stat_inc_gc_call_count(sbi, FOREGROUND);
err = f2fs_gc(sbi, &gc_control);

View File

@@ -23,6 +23,9 @@
#include "iostat.h"
#include <trace/events/f2fs.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(f2fs_gc_begin);
EXPORT_TRACEPOINT_SYMBOL_GPL(f2fs_gc_end);
static struct kmem_cache *victim_entry_slab;
static unsigned int count_bits(const unsigned long *addr,

View File

@@ -4780,6 +4780,10 @@ try_onemore:
/* get segno of first zoned block device */
sbi->first_zoned_segno = get_first_seq_zone_segno(sbi);
sbi->reserved_pin_section = f2fs_sb_has_blkzoned(sbi) ?
ZONED_PIN_SEC_REQUIRED_COUNT :
GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi));
/* Read accumulated write IO statistics if exists */
seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
if (__exist_node_summaries(sbi))

View File

@@ -824,6 +824,13 @@ out:
return count;
}
if (!strcmp(a->attr.name, "reserved_pin_section")) {
if (t > GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))
return -EINVAL;
*ui = (unsigned int)t;
return count;
}
*ui = (unsigned int)t;
return count;
@@ -1130,6 +1137,7 @@ F2FS_SBI_GENERAL_RO_ATTR(unusable_blocks_per_sec);
F2FS_SBI_GENERAL_RW_ATTR(blkzone_alloc_policy);
#endif
F2FS_SBI_GENERAL_RW_ATTR(carve_out);
F2FS_SBI_GENERAL_RW_ATTR(reserved_pin_section);
/* STAT_INFO ATTR */
#ifdef CONFIG_F2FS_STAT_FS
@@ -1323,6 +1331,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(last_age_weight),
ATTR_LIST(max_read_extent_count),
ATTR_LIST(carve_out),
ATTR_LIST(reserved_pin_section),
NULL,
};
ATTRIBUTE_GROUPS(f2fs);

View File

@@ -244,28 +244,32 @@ static unsigned int fuse_req_hash(u64 unique)
/*
* A new request is available, wake fiq->waitq
*/
static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq)
static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq, bool sync)
__releases(fiq->lock)
{
wake_up(&fiq->waitq);
if (sync)
wake_up_sync(&fiq->waitq);
else
wake_up(&fiq->waitq);
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
spin_unlock(&fiq->lock);
}
static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *forget)
static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *forget,
bool sync)
{
spin_lock(&fiq->lock);
if (fiq->connected) {
fiq->forget_list_tail->next = forget;
fiq->forget_list_tail = forget;
fuse_dev_wake_and_unlock(fiq);
fuse_dev_wake_and_unlock(fiq, sync);
} else {
kfree(forget);
spin_unlock(&fiq->lock);
}
}
static void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
static void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req, bool sync)
{
spin_lock(&fiq->lock);
if (list_empty(&req->intr_entry)) {
@@ -279,21 +283,21 @@ static void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *r
list_del_init(&req->intr_entry);
spin_unlock(&fiq->lock);
} else {
fuse_dev_wake_and_unlock(fiq);
fuse_dev_wake_and_unlock(fiq, sync);
}
} else {
spin_unlock(&fiq->lock);
}
}
static void fuse_dev_queue_req(struct fuse_iqueue *fiq, struct fuse_req *req)
static void fuse_dev_queue_req(struct fuse_iqueue *fiq, struct fuse_req *req, bool sync)
{
spin_lock(&fiq->lock);
if (fiq->connected) {
if (req->in.h.opcode != FUSE_NOTIFY_REPLY)
req->in.h.unique = fuse_get_unique_locked(fiq);
list_add_tail(&req->list, &fiq->pending);
fuse_dev_wake_and_unlock(fiq);
fuse_dev_wake_and_unlock(fiq, sync);
} else {
spin_unlock(&fiq->lock);
req->out.h.error = -ENOTCONN;
@@ -309,13 +313,13 @@ const struct fuse_iqueue_ops fuse_dev_fiq_ops = {
};
EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops);
static void fuse_send_one(struct fuse_iqueue *fiq, struct fuse_req *req)
static void fuse_send_one(struct fuse_iqueue *fiq, struct fuse_req *req, bool sync)
{
req->in.h.len = sizeof(struct fuse_in_header) +
fuse_len_args(req->args->in_numargs,
(struct fuse_arg *) req->args->in_args);
trace_fuse_request_send(req);
fiq->ops->send_req(fiq, req);
fiq->ops->send_req(fiq, req, sync);
}
void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
@@ -331,7 +335,7 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
forget->forget_one.nodeid = nodeid;
forget->forget_one.nlookup = nlookup;
fiq->ops->send_forget(fiq, forget);
fiq->ops->send_forget(fiq, forget, false);
}
static void flush_bg_queue(struct fuse_conn *fc)
@@ -345,7 +349,7 @@ static void flush_bg_queue(struct fuse_conn *fc)
req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
list_del(&req->list);
fc->active_background++;
fuse_send_one(fiq, req);
fuse_send_one(fiq, req, false);
}
}
@@ -421,7 +425,7 @@ static int queue_interrupt(struct fuse_req *req)
if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags)))
return -EINVAL;
fiq->ops->send_interrupt(fiq, req);
fiq->ops->send_interrupt(fiq, req, false);
return 0;
}
@@ -482,7 +486,7 @@ static void __fuse_request_send(struct fuse_req *req)
/* acquire extra reference, since request is still needed after
fuse_request_end() */
__fuse_get_request(req);
fuse_send_one(fiq, req);
fuse_send_one(fiq, req, true);
request_wait_answer(req);
/* Pairs with smp_wmb() in fuse_request_end() */
@@ -660,7 +664,7 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm,
fuse_args_to_req(req, args);
fuse_send_one(fiq, req);
fuse_send_one(fiq, req, false);
return 0;
}
@@ -1888,7 +1892,7 @@ static void fuse_resend(struct fuse_conn *fc)
}
/* iq and pq requests are both oldest to newest */
list_splice(&to_queue, &fiq->pending);
fuse_dev_wake_and_unlock(fiq);
fuse_dev_wake_and_unlock(fiq, false);
}
static int fuse_notify_resend(struct fuse_conn *fc)

View File

@@ -528,17 +528,17 @@ struct fuse_iqueue_ops {
/**
* Send one forget
*/
void (*send_forget)(struct fuse_iqueue *fiq, struct fuse_forget_link *link);
void (*send_forget)(struct fuse_iqueue *fiq, struct fuse_forget_link *link, bool sync);
/**
* Send interrupt for request
*/
void (*send_interrupt)(struct fuse_iqueue *fiq, struct fuse_req *req);
void (*send_interrupt)(struct fuse_iqueue *fiq, struct fuse_req *req, bool sync);
/**
* Send one request
*/
void (*send_req)(struct fuse_iqueue *fiq, struct fuse_req *req);
void (*send_req)(struct fuse_iqueue *fiq, struct fuse_req *req, bool sync);
/**
* Clean up when fuse_iqueue is destroyed

View File

@@ -1229,7 +1229,7 @@ static struct virtio_driver virtio_fs_driver = {
#endif
};
static void virtio_fs_send_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *link)
static void virtio_fs_send_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *link, bool sync)
{
struct virtio_fs_forget *forget;
struct virtio_fs_forget_req *req;
@@ -1255,7 +1255,7 @@ static void virtio_fs_send_forget(struct fuse_iqueue *fiq, struct fuse_forget_li
kfree(link);
}
static void virtio_fs_send_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
static void virtio_fs_send_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req, bool sync)
{
/*
* TODO interrupts.
@@ -1468,7 +1468,7 @@ out:
return ret;
}
static void virtio_fs_send_req(struct fuse_iqueue *fiq, struct fuse_req *req)
static void virtio_fs_send_req(struct fuse_iqueue *fiq, struct fuse_req *req, bool sync)
{
unsigned int queue_id;
struct virtio_fs *fs;

File diff suppressed because it is too large Load Diff

View File

@@ -7,3 +7,6 @@ type 'enum prs_errcode' changed
type 'struct sched_dl_entity' changed
member 'unsigned int dl_server_idle:1' was added
type 'struct f2fs_sb_info' changed
member 'union { unsigned int reserved_pin_section; unsigned char __kabi_ignored0; }' was added

View File

@@ -195,10 +195,12 @@
consume_skb
contig_page_data
__contpte_try_unfold
_copy_from_iter
copy_from_kernel_nofault
__copy_overflow
copy_page
copy_page_from_iter_atomic
_copy_to_iter
cpu_all_bits
cpu_bit_bitmap
cpufreq_boost_enabled
@@ -234,10 +236,13 @@
crypto_aead_setauthsize
crypto_aead_setkey
crypto_ahash_digest
crypto_ahash_final
crypto_ahash_finup
crypto_ahash_setkey
crypto_alloc_aead
crypto_alloc_ahash
crypto_alloc_base
crypto_alloc_rng
crypto_alloc_shash
crypto_alloc_skcipher
crypto_cipher_encrypt_one
@@ -247,13 +252,17 @@
crypto_dequeue_request
crypto_destroy_tfm
crypto_enqueue_request
crypto_get_default_null_skcipher
crypto_has_alg
crypto_init_queue
__crypto_memneq
crypto_put_default_null_skcipher
crypto_register_ahash
crypto_register_alg
crypto_register_shash
crypto_register_skcipher
crypto_req_done
crypto_rng_reset
crypto_sha1_finup
crypto_sha1_update
crypto_shash_digest
@@ -636,6 +645,7 @@
drm_atomic_set_mode_prop_for_crtc
drm_atomic_state_alloc
drm_atomic_state_clear
drm_atomic_state_default_release
__drm_atomic_state_free
drm_compat_ioctl
drm_connector_attach_content_type_property
@@ -1299,6 +1309,7 @@
__local_bh_enable_ip
__lock_buffer
lockref_get
lock_sock_nested
logfc
log_post_read_mmio
log_post_write_mmio
@@ -1799,6 +1810,8 @@
proc_mkdir
proc_mkdir_data
proc_remove
proto_register
proto_unregister
__pskb_copy_fclone
pskb_expand_head
__pskb_pull_tail
@@ -1920,6 +1933,7 @@
release_firmware
__release_region
release_resource
release_sock
remap_pfn_range
remap_vmalloc_range
remove_cpu
@@ -2015,6 +2029,8 @@
sdio_writel
sdio_writesb
sdio_writew
security_sk_clone
security_sock_graft
send_sig
seq_list_next
seq_list_start
@@ -2077,6 +2093,7 @@
single_release
si_swapinfo
sized_strscpy
sk_alloc
skb_add_rx_frag_netmem
skb_checksum_help
skb_clone
@@ -2104,6 +2121,7 @@
skb_trim
skb_tstamp_tx
skb_unlink
sk_free
skip_spaces
sk_skb_reason_drop
smpboot_register_percpu_thread
@@ -2193,10 +2211,25 @@
snd_timer_stop
snprintf
__sock_create
sock_init_data
sock_kfree_s
sock_kmalloc
sock_kzfree_s
sock_no_accept
sock_no_bind
sock_no_connect
sock_no_getname
sock_no_ioctl
sock_no_listen
sock_no_mmap
sock_no_recvmsg
sock_no_sendmsg
sock_no_shutdown
sock_no_socketpair
sock_register
sock_release
sock_unregister
sock_wake_async
sock_wfree
sort
spi_add_device
@@ -2732,10 +2765,12 @@
wakeup_source_register
wakeup_source_unregister
__wake_up_sync
__wake_up_sync_key
__warn_flushing_systemwide_wq
__warn_printk
wireless_nlevent_flush
wireless_send_event
woken_wake_function
work_busy
write_inode_now
__write_overflow_field

85
gki/aarch64/symbols/asr Normal file
View File

@@ -0,0 +1,85 @@
[abi_symbol_list]
# required by asr5803.ko
sdhci_enable_sdio_irq
# required by asr_serial.ko
uart_get_divisor
uart_handle_cts_change
uart_handle_dcd_change
uart_insert_char
# required by ehci-asr-ci.ko
ehci_init_driver
ehci_setup
# required by phy-asr-ci-usb2.ko
usb_add_phy_dev
usb_remove_phy
# required by pvrsrvkm.ko
call_rcu
devm_devfreq_remove_device
dev_pm_opp_remove
dma_fence_array_ops
dma_fence_enable_sw_signaling
idr_replace
kthread_freezable_should_stop
rcu_barrier
# required by sdhci_asr.ko
sdhci_resume_host
sdhci_send_tuning
sdhci_set_clock
sdhci_set_uhs_signaling
sdhci_suspend_host
sdhci_reset_tuning
# required by vh_sched.ko
__traceiter_android_vh_map_util_freq
__tracepoint_android_vh_map_util_freq
# required by asr_drm.ko
clk_set_rate_exclusive
clk_rate_exclusive_put
# required by mercury.ko
media_device_register_entity
media_device_unregister_entity
v4l2_ctrl_get_menu
v4l2_ctrl_type_op_equal
v4l2_ctrl_type_op_init
v4l2_ctrl_type_op_log
v4l2_m2m_buf_done_and_job_finish
v4l2_m2m_last_buf
v4l2_type_names
devm_devfreq_register_opp_notifier
# required by jpu_heap.ko
kmem_cache_size
memset16
# required by dwc3.ko
extcon_find_edev_by_node
phy_pm_runtime_put_sync
usb_get_maximum_ssp_rate
# required by xhci-asr.ko
extcon_find_edev_by_node
# required by clk-asr.ko
clk_register_mux_table
# required by keypad-asr.ko
input_device_enabled
fwnode_create_software_node
# required by asr_mipi_panel.ko
devm_of_find_backlight
drm_of_component_probe
mipi_dsi_set_maximum_return_packet_size
# required by usb_offload.ko
usb_set_interface
usb_control_msg
snd_usb_find_format

View File

@@ -8,6 +8,8 @@
__alloc_skb
alloc_workqueue
alt_cb_patch_nops
__amba_driver_register
amba_driver_unregister
android_rvh_probe_register
__arch_copy_from_user
__arch_copy_to_user
@@ -45,6 +47,8 @@
bpf_trace_run8
bpf_trace_run9
bus_get_dev_root
bus_register
bus_unregister
call_rcu
cancel_delayed_work
cancel_delayed_work_sync
@@ -58,6 +62,7 @@
__check_object_size
class_create
class_destroy
class_find_device
class_register
class_unregister
clk_disable
@@ -77,6 +82,9 @@
complete_all
completion_done
config_ep_by_speed
configfs_register_subsystem
configfs_unregister_subsystem
config_group_init
config_group_init_type_name
console_suspend_enabled
console_unlock
@@ -93,19 +101,24 @@
cpufreq_quick_get
cpufreq_register_notifier
cpufreq_unregister_notifier
__cpuhp_remove_state
__cpuhp_setup_state
__cpuhp_setup_state_cpuslocked
__cpuhp_state_add_instance
__cpuhp_state_remove_instance
__cpu_online_mask
cpu_pm_register_notifier
__cpu_possible_mask
cpu_scale
cpus_read_lock
cpus_read_unlock
cpu_subsys
crc32_le
crypto_alloc_shash
crypto_destroy_tfm
csum_partial
_ctype
debugfs_create_blob
debugfs_create_bool
debugfs_create_dir
debugfs_create_file
debugfs_create_symlink
@@ -126,11 +139,15 @@
devfreq_get_devfreq_by_phandle
devfreq_recommended_opp
devfreq_remove_device
dev_get_by_name
dev_get_stats
device_add
device_create
device_create_file
device_destroy
device_for_each_child
device_initialize
device_property_present
device_register
device_remove_file
device_set_wakeup_capable
@@ -160,6 +177,7 @@
devm_kfree
devm_kmalloc
devm_kmemdup
devm_krealloc
devm_kstrdup
devm_mfd_add_devices
__devm_of_phy_provider_register
@@ -176,6 +194,7 @@
__devm_request_region
devm_request_threaded_irq
devm_snd_soc_register_component
devm_thermal_of_zone_register
_dev_notice
dev_pm_opp_add_dynamic
dev_pm_opp_find_freq_ceil
@@ -187,6 +206,8 @@
dev_pm_qos_remove_request
dev_pm_qos_update_request
__dev_queue_xmit
devres_add
__devres_alloc_node
dev_set_name
_dev_warn
disable_irq
@@ -200,11 +221,9 @@
dma_buf_end_cpu_access_partial
dma_buf_get
dma_buf_get_flags
dma_buf_map_attachment
dma_buf_map_attachment_unlocked
dma_buf_mmap
dma_buf_put
dma_buf_unmap_attachment
dma_buf_unmap_attachment_unlocked
dma_buf_vmap_unlocked
dma_buf_vunmap_unlocked
@@ -238,6 +257,8 @@
dma_unmap_page_attrs
dma_unmap_sg_attrs
do_wait_intr
down_read
down_write
d_path
drain_workqueue
driver_unregister
@@ -293,6 +314,7 @@
dump_stack
enable_irq
eth_type_trans
fdget
fd_install
_find_first_bit
_find_first_zero_bit
@@ -305,6 +327,7 @@
flush_delayed_work
flush_work
__flush_workqueue
__folio_put
__fortify_panic
fput
free_irq
@@ -313,7 +336,9 @@
free_pages
free_percpu
free_reserved_page
freq_qos_add_request
freq_qos_update_request
fwnode_property_present
gcd
generic_file_llseek
generic_handle_irq
@@ -333,6 +358,7 @@
get_free_pages_noprof
get_pid_task
get_random_bytes
__get_task_comm
get_task_mm
get_unused_fd_flags
gpiochip_add_pin_range
@@ -355,6 +381,7 @@
hrtimer_active
hrtimer_cancel
hrtimer_forward
__hrtimer_get_remaining
hrtimer_init
hrtimer_start_range_ns
hrtimer_try_to_cancel
@@ -374,12 +401,16 @@
ida_alloc_range
ida_free
idr_alloc
idr_destroy
idr_find
idr_for_each
idr_remove
in4_pton
in6_pton
init_dummy_netdev
init_net
__init_rwsem
init_srcu_struct
__init_swait_queue_head
init_task
init_timer_key
@@ -471,6 +502,8 @@
ktime_get_real_ts64
ktime_get_ts64
ktime_get_with_offset
kunit_binary_assert_format
__kunit_do_failed_assertion
kvfree
kvfree_call_rcu
__kvmalloc_node_noprof
@@ -496,9 +529,13 @@
mfd_remove_devices
misc_deregister
misc_register
__mmap_lock_do_trace_acquire_returned
__mmap_lock_do_trace_released
__mmap_lock_do_trace_start_locking
mod_delayed_work_on
mod_timer
module_layout
module_put
__msecs_to_jiffies
msleep
msleep_interruptible
@@ -525,6 +562,7 @@
nla_put
nla_put_64bit
nla_reserve
nonseekable_open
noop_llseek
nr_cpu_ids
nsecs_to_jiffies
@@ -536,6 +574,7 @@
of_alias_get_id
of_clk_get_by_name
of_count_phandle_with_args
of_cpu_node_to_id
of_device_get_match_data
of_device_is_available
of_device_is_compatible
@@ -546,6 +585,7 @@
of_find_node_by_type
of_find_node_opts_by_path
of_find_property
of_fwnode_ops
of_get_child_by_name
of_get_named_gpio
of_get_next_available_child
@@ -557,6 +597,7 @@
of_match_device
of_match_node
of_n_addr_cells
of_node_name_eq
of_n_size_cells
__of_parse_phandle_with_args
of_phandle_iterator_init
@@ -576,6 +617,8 @@
of_reserved_mem_device_release
of_reserved_mem_lookup
of_root
page_pinner_inited
__page_pinner_put_page
panic
panic_notifier_list
param_array_ops
@@ -612,6 +655,7 @@
perf_event_enable
perf_event_read_local
perf_event_release_kernel
perf_pmu_unregister
perf_trace_buf_alloc
perf_trace_run_bpf_submit
pfn_is_map_memory
@@ -622,6 +666,7 @@
pinctrl_lookup_state
pinctrl_select_state
pinctrl_utils_free_map
pin_user_pages
platform_device_register_full
platform_device_unregister
__platform_driver_probe
@@ -696,6 +741,7 @@
_raw_write_unlock
_raw_write_unlock_irqrestore
rb_erase
rb_first
rb_insert_color
rb_next
rcu_barrier
@@ -710,7 +756,6 @@
__register_chrdev
register_chrdev_region
register_die_notifier
register_kretprobe
register_netdev
register_netdevice
register_pm_notifier
@@ -729,7 +774,6 @@
regulator_put
regulator_set_mode
release_firmware
__release_region
remap_pfn_range
remove_proc_entry
request_firmware
@@ -757,6 +801,7 @@
seq_read
seq_write
set_cpus_allowed_ptr
set_normalized_timespec64
sg_alloc_table
sg_free_table
sg_init_table
@@ -827,6 +872,8 @@
sprintf
srcu_notifier_call_chain
srcu_notifier_chain_register
__srcu_read_lock
__srcu_read_unlock
sscanf
__stack_chk_fail
static_key_slow_dec
@@ -854,6 +901,7 @@
sync_file_create
synchronize_net
synchronize_rcu
synchronize_srcu
syscon_regmap_lookup_by_phandle
sysfs_add_file_to_group
sysfs_create_bin_file
@@ -863,6 +911,7 @@
sysfs_create_groups
sysfs_create_link
sysfs_emit
sysfs_emit_at
sysfs_remove_bin_file
sysfs_remove_file_ns
sysfs_remove_group
@@ -877,7 +926,9 @@
tasklet_init
tasklet_kill
__tasklet_schedule
thermal_zone_device_priv
thermal_zone_device_type
thermal_zone_device_update
thermal_zone_get_temp
thermal_zone_get_zone_by_name
time64_to_tm
@@ -897,9 +948,14 @@
__traceiter_android_vh_cpuidle_psci_exit
__traceiter_android_vh_free_task
__traceiter_android_vh_is_fpsimd_save
__traceiter_android_vh_meminfo_proc_show
__traceiter_android_vh_show_mem
__traceiter_clock_set_rate
__traceiter_device_pm_callback_end
__traceiter_device_pm_callback_start
__traceiter_mmap_lock_acquire_returned
__traceiter_mmap_lock_released
__traceiter_mmap_lock_start_locking
__traceiter_suspend_resume
__tracepoint_android_rvh_sched_fork_init
__tracepoint_android_vh_cpu_idle_enter
@@ -908,27 +964,34 @@
__tracepoint_android_vh_cpuidle_psci_exit
__tracepoint_android_vh_free_task
__tracepoint_android_vh_is_fpsimd_save
__tracepoint_android_vh_meminfo_proc_show
__tracepoint_android_vh_show_mem
__tracepoint_clock_set_rate
__tracepoint_device_pm_callback_end
__tracepoint_device_pm_callback_start
__tracepoint_mmap_lock_acquire_returned
__tracepoint_mmap_lock_released
__tracepoint_mmap_lock_start_locking
tracepoint_probe_register
tracepoint_probe_unregister
__tracepoint_suspend_resume
trace_print_array_seq
trace_raw_output_prep
__trace_trigger_soft_disabled
try_module_get
ttm_bo_mmap_obj
__udelay
unpin_user_page
unpin_user_pages
__unregister_chrdev
unregister_chrdev_region
unregister_kretprobe
unregister_netdev
unregister_netdevice_queue
unregister_pm_notifier
unregister_reboot_notifier
up
up_read
up_write
usb_enable_autosuspend
usb_ep_alloc_request
usb_ep_autoconfig
@@ -997,6 +1060,7 @@
vmalloc_noprof
vmalloc_to_pfn
vmap
__vma_start_write
vscnprintf
vsnprintf
vunmap
@@ -1011,10 +1075,7 @@
wakeup_source_register
wakeup_source_unregister
__warn_printk
# required by audio_exynos_test.ko
kunit_binary_assert_format
__kunit_do_failed_assertion
work_busy
# required by cfg80211.ko
debugfs_rename
@@ -1022,7 +1083,6 @@
dev_close
__dev_get_by_index
dev_get_by_index
device_add
device_del
device_rename
do_trace_netlink_extack
@@ -1077,6 +1137,41 @@
# required by cmupmucal.ko
single_open_size
# required by coresight.ko
amba_bustype
bus_find_device
bus_for_each_dev
configfs_register_group
configfs_unregister_group
config_item_set_name
__cpu_present_mask
__dev_fwnode
device_match_fwnode
devm_bitmap_zalloc
dma_alloc_pages
dma_free_pages
fwnode_handle_get
fwnode_property_read_u32_array
hashlen_string
idr_alloc_u32
__kmalloc_cache_node_noprof
of_get_next_parent
of_graph_get_next_endpoint
of_graph_get_port_parent
of_graph_get_remote_endpoint
of_graph_is_present
of_graph_parse_endpoint
perf_aux_output_begin
perf_aux_output_end
perf_aux_output_flag
perf_get_aux
perf_pmu_register
perf_report_aux_output_id
platform_bus_type
sysfs_add_link_to_group
sysfs_remove_file_from_group
sysfs_remove_link_from_group
# required by cpif.ko
csum_ipv6_magic
csum_tcpudp_nofold
@@ -1087,7 +1182,6 @@
pci_restore_msi_state
# required by cpif_dinet.ko
dev_get_by_name
get_random_u16
in_aton
in_dev_finish_destroy
@@ -1096,9 +1190,6 @@
neigh_for_each
nf_ct_iterate_cleanup_net
# required by cpif_page.ko
__page_frag_cache_drain
# required by drm_display_helper.ko
drm_atomic_get_new_private_obj_state
drm_atomic_get_old_private_obj_state
@@ -1178,15 +1269,15 @@
# required by dwc3-exynos-usb.ko
device_create_managed_software_node
device_property_present
device_set_wakeup_enable
phy_set_mode_ext
platform_device_add
platform_device_add_resources
platform_device_alloc
platform_device_del
platform_device_put
pm_runtime_allow
register_kretprobe
unregister_kretprobe
usb_gadget_set_state
usb_hcd_is_primary_hcd
usb_otg_state_string
@@ -1200,6 +1291,7 @@
arch_freq_scale
balance_push_callback
bpf_trace_run12
capacity_freq_ref
__cpu_dying_mask
cpufreq_add_update_util_hook
cpufreq_cpu_get_raw
@@ -1210,11 +1302,11 @@
cpufreq_register_governor
cpufreq_remove_update_util_hook
cpufreq_this_cpu_can_update
__cpuhp_remove_state
cpuidle_governor_latency_req
cpuidle_register_governor
cpumask_any_and_distribute
cpupri_find_fitness
cpu_scale
cpu_topology
deactivate_task
double_rq_lock
@@ -1222,7 +1314,6 @@
housekeeping_any_cpu
housekeeping_overridden
housekeeping_test_cpu
__hrtimer_get_remaining
irq_work_queue
irq_work_sync
kthread_create_worker_on_cpu
@@ -1244,20 +1335,23 @@
set_task_cpu
static_key_disable
stop_machine
stop_one_cpu
stop_one_cpu_nowait
sysctl_sched_features
system_32bit_el0_cpumask
tasklist_lock
task_rq_lock
tick_nohz_get_sleep_length
__traceiter_android_rvh_after_enqueue_task
__traceiter_android_rvh_after_dequeue_task
__traceiter_android_rvh_attach_entity_load_avg
__traceiter_android_rvh_can_migrate_task
__traceiter_android_rvh_check_preempt_wakeup_fair
__traceiter_android_rvh_cpu_capacity_show
__traceiter_android_rvh_cpu_cgroup_attach
__traceiter_android_rvh_dequeue_task
__traceiter_android_rvh_detach_entity_load_avg
__traceiter_android_rvh_do_sched_yield
__traceiter_android_rvh_enqueue_task
__traceiter_android_rvh_find_busiest_queue
__traceiter_android_rvh_find_lowest_rq
__traceiter_android_rvh_find_new_ilb
@@ -1277,10 +1371,12 @@
__traceiter_android_rvh_select_task_rq_rt
__traceiter_android_rvh_set_cpus_allowed_by_task
__traceiter_android_rvh_set_task_cpu
__traceiter_android_rvh_show_max_freq
__traceiter_android_rvh_tick_entry
__traceiter_android_rvh_try_to_wake_up
__traceiter_android_rvh_uclamp_eff_get
__traceiter_android_rvh_update_blocked_fair
__traceiter_android_rvh_update_cpu_capacity
__traceiter_android_rvh_update_load_avg
__traceiter_android_rvh_update_misfit_status
__traceiter_android_rvh_util_est_update
@@ -1291,14 +1387,17 @@
__traceiter_android_vh_scheduler_tick
__traceiter_android_vh_syscall_prctl_finished
__traceiter_binder_transaction_received
__tracepoint_android_rvh_after_enqueue_task
__traceiter_cpu_frequency_limits
__tracepoint_android_rvh_after_dequeue_task
__tracepoint_android_rvh_attach_entity_load_avg
__tracepoint_android_rvh_can_migrate_task
__tracepoint_android_rvh_check_preempt_wakeup_fair
__tracepoint_android_rvh_cpu_capacity_show
__tracepoint_android_rvh_cpu_cgroup_attach
__tracepoint_android_rvh_dequeue_task
__tracepoint_android_rvh_detach_entity_load_avg
__tracepoint_android_rvh_do_sched_yield
__tracepoint_android_rvh_enqueue_task
__tracepoint_android_rvh_find_busiest_queue
__tracepoint_android_rvh_find_lowest_rq
__tracepoint_android_rvh_find_new_ilb
@@ -1318,10 +1417,12 @@
__tracepoint_android_rvh_select_task_rq_rt
__tracepoint_android_rvh_set_cpus_allowed_by_task
__tracepoint_android_rvh_set_task_cpu
__tracepoint_android_rvh_show_max_freq
__tracepoint_android_rvh_tick_entry
__tracepoint_android_rvh_try_to_wake_up
__tracepoint_android_rvh_uclamp_eff_get
__tracepoint_android_rvh_update_blocked_fair
__tracepoint_android_rvh_update_cpu_capacity
__tracepoint_android_rvh_update_load_avg
__tracepoint_android_rvh_update_misfit_status
__tracepoint_android_rvh_util_est_update
@@ -1332,6 +1433,7 @@
__tracepoint_android_vh_scheduler_tick
__tracepoint_android_vh_syscall_prctl_finished
__tracepoint_binder_transaction_received
__tracepoint_cpu_frequency_limits
uclamp_eff_value
update_rq_clock
wake_up_if_idle
@@ -1351,6 +1453,10 @@
# required by exynos-cpu-phyid.ko
of_get_next_cpu_node
# required by exynos-cpu-profiler.ko
__traceiter_android_rvh_cpufreq_transition
__tracepoint_android_rvh_cpufreq_transition
# required by exynos-cpufreq.ko
cpufreq_freq_transition_begin
cpufreq_freq_transition_end
@@ -1380,9 +1486,30 @@
cpu_hotplug_disable
cpu_hotplug_enable
# required by exynos-ecu.ko
__of_get_address
# required by exynos-hsi-iommu.ko
gen_pool_has_addr
# required by exynos-hvm.ko
add_wait_queue_priority
alloc_pages_exact_noprof
anon_inode_getfd
disable_percpu_irq
enable_percpu_irq
eventfd_ctx_do_read
eventfd_ctx_fdget
eventfd_ctx_fileget
eventfd_ctx_put
eventfd_ctx_remove_wait_queue
eventfd_signal_mask
free_pages_exact
mtree_load
of_irq_get
__request_percpu_irq
unpin_user_pages_dirty_lock
# required by exynos-irq-gic-v3-its.ko
its_restore_enable
its_save_disable
@@ -1397,22 +1524,38 @@
# required by exynos-msc-dsu.ko
bitmap_alloc
# required by exynos-ntc.ko
device_get_match_data
device_property_read_u32_array
devm_iio_channel_get
iio_convert_raw_to_processed
iio_get_channel_type
iio_read_channel_raw
# required by exynos-pd.ko
of_genpd_add_provider_simple
pm_genpd_add_subdomain
pm_genpd_init
# required by exynos-seclog.ko
debugfs_create_bool
# required by exynos-usb-audio-offloading.ko
snd_ctl_add
snd_ctl_new1
snd_usb_autoresume
snd_usb_autosuspend
snd_usb_register_platform_ops
__traceiter_android_rvh_usb_dev_suspend
__traceiter_android_vh_usb_dev_resume
__tracepoint_android_rvh_usb_dev_suspend
__tracepoint_android_vh_usb_dev_resume
usb_altnum_to_altsetting
usb_choose_configuration
usb_ifnum_to_if
xhci_get_endpoint_index
xhci_get_ep_ctx
xhci_sideband_add_endpoint
xhci_sideband_create_interrupter
xhci_sideband_register
xhci_sideband_unregister
# required by exynos_amb_control.ko
kthread_cancel_delayed_work_sync
@@ -1474,7 +1617,6 @@
__drm_atomic_helper_plane_duplicate_state
drm_atomic_helper_prepare_planes
drm_atomic_helper_set_config
drm_atomic_helper_setup_commit
drm_atomic_helper_swap_state
drm_atomic_helper_update_legacy_modeset_state
drm_atomic_helper_update_plane
@@ -1494,12 +1636,11 @@
drm_crtc_accurate_vblank_count
drm_crtc_enable_color_mgmt
drm_crtc_handle_vblank
drm_crtc_send_vblank_event
drm_crtc_vblank_count
drm_crtc_vblank_count_and_time
drm_crtc_vblank_get
drm_crtc_vblank_off
drm_crtc_vblank_on
drm_crtc_vblank_on_config
drm_crtc_vblank_put
drm_display_mode_to_videomode
drm_edid_to_sad
@@ -1546,15 +1687,13 @@
drm_property_create_enum
drm_property_create_range
drm_property_create_signed_range
drm_property_lookup_blob
drm_property_replace_blob
drm_property_replace_blob_from_id
__drm_puts_seq_file
drm_rect_clip_scaled
drm_rect_intersect
drm_rotation_simplify
drm_self_refresh_helper_alter_state
drm_send_event_locked
drm_wait_one_vblank
drm_writeback_cleanup_job
drm_writeback_connector_init
drm_writeback_queue_job
@@ -1575,6 +1714,7 @@
phy_init
platform_find_device_by_driver
seq_release
try_wait_for_completion
v4l2_match_dv_timings
# required by exynos_drmdpu_panel.ko
@@ -1647,7 +1787,6 @@
snd_soc_pm_ops
# required by exynos_thermal_v2.ko
devm_thermal_of_zone_register
dev_pm_opp_get_opp_count
kthread_flush_work
of_get_cpu_node
@@ -1657,8 +1796,6 @@
thermal_zone_device_disable
thermal_zone_device_enable
thermal_zone_device_id
thermal_zone_device_priv
thermal_zone_device_update
thermal_zone_for_each_trip
thermal_zone_set_trip_temp
@@ -1669,6 +1806,7 @@
do_SAK
handle_sysrq
__kfifo_out_linear
__release_region
sysrq_mask
tty_flip_buffer_push
__tty_insert_flip_string_flags
@@ -1693,7 +1831,6 @@
platform_device_register
# required by freq-qos-tracer.ko
freq_qos_add_request
freq_qos_remove_request
# required by gpu-sched.ko
@@ -1709,6 +1846,9 @@
# required by hardlockup-watchdog.ko
smp_call_on_cpu
# required by hdcp2.ko
crypto_shash_digest
# required by hook.ko
__traceiter_android_vh_do_wake_up_sync
__traceiter_android_vh_set_wake_flags
@@ -1719,6 +1859,13 @@
# required by hts.ko
kernel_cpustat
# required by hwmon.ko
device_find_child
device_property_read_string
devres_free
i2c_verify_client
sysfs_notify
# required by irq-gic-v3-vh.ko
__traceiter_android_rvh_gic_v3_set_affinity
__tracepoint_android_rvh_gic_v3_set_affinity
@@ -1743,18 +1890,14 @@
devm_rtc_device_register
# required by mcDrvModule.ko
crypto_alloc_shash
crypto_destroy_tfm
crypto_shash_final
crypto_shash_update
down_read
freezer_active
freezing_slow_path
get_zeroed_page_noprof
kstrtol_from_user
ktime_get_raw_ts64
mmput
pin_user_pages
__refrigerator
release_pages
sg_miter_next
@@ -1762,17 +1905,16 @@
sg_miter_stop
vmalloc_to_page
wait_for_completion_killable
wait_for_completion_state
# required by nanohub.ko
arch_timer_read_counter
class_find_device
clocks_calc_mult_shift
device_create_bin_file
iio_device_alloc
iio_device_free
__iio_device_register
iio_device_unregister
nonseekable_open
rtc_set_time
rtc_tm_to_time64
sched_setscheduler
@@ -1782,7 +1924,7 @@
cpuidle_pause_and_lock
cpuidle_resume_and_unlock
dev_pm_opp_find_freq_floor
__vma_start_write
of_property_read_variable_u64_array
vsprintf
# required by pablo-actuator-ak737x.ko
@@ -1825,7 +1967,6 @@
kvmemdup
kvrealloc_noprof
llist_add_batch
rb_first
tasklet_setup
# required by pcie-exynos-rc-core.ko
@@ -1847,13 +1988,10 @@
# required by pinctrl-samsung-ext.ko
device_get_next_child_node
fwnode_property_present
generic_handle_domain_irq
gpiochip_lock_as_irq
gpiochip_unlock_as_irq
irq_set_chained_handler_and_data
of_fwnode_ops
of_node_name_eq
pinctrl_add_gpio_range
pinctrl_force_default
pinctrl_force_sleep
@@ -1911,8 +2049,6 @@
iommu_group_set_name
# required by samsung-pdma.ko
__amba_driver_register
amba_driver_unregister
cpu_all_bits
__devm_reset_control_get
dma_async_device_register
@@ -1940,20 +2076,18 @@
dma_heap_get_drvdata
dma_heap_get_name
is_dma_buf_file
kstrtoul_from_user
mod_node_page_state
sched_set_normal
shrinker_alloc
shrinker_free
shrinker_register
__traceiter_android_vh_show_mem
__tracepoint_android_vh_show_mem
vm_insert_page
# required by samsung_iommu_v9.ko
device_link_add
device_link_del
dev_iommu_priv_set
devres_add
__devres_alloc_node
iommu_alloc_resv_region
iommu_device_register
iommu_device_sysfs_add
@@ -1966,7 +2100,16 @@
iommu_group_set_iommudata
iommu_set_fault_handler
of_find_node_with_property
rb_prev
report_iommu_fault
__traceiter_android_rvh_iommu_alloc_insert_iova
__traceiter_android_rvh_iommu_dma_info_to_prot
__traceiter_android_rvh_iommu_iovad_init_alloc_algo
__traceiter_android_rvh_iommu_limit_align_shift
__tracepoint_android_rvh_iommu_alloc_insert_iova
__tracepoint_android_rvh_iommu_dma_info_to_prot
__tracepoint_android_rvh_iommu_iovad_init_alloc_algo
__tracepoint_android_rvh_iommu_limit_align_shift
# required by scaler.ko
dma_fence_default_wait
@@ -1974,7 +2117,6 @@
sync_file_get_fence
v4l2_ctrl_handler_setup
v4l2_m2m_try_schedule
work_busy
# required by scsc_bt.ko
crc_ccitt
@@ -2022,7 +2164,6 @@
dql_reset
dst_release
ether_setup
for_each_kernel_tracepoint
ip_route_output_flow
ip_send_check
linkwatch_fire_event
@@ -2084,8 +2225,10 @@
devm_platform_ioremap_resource_byname
dma_addressing_limited
dma_buf_dynamic_attach
dma_buf_map_attachment
dma_buf_move_notify
dma_buf_pin
dma_buf_unmap_attachment
dma_buf_unpin
dma_fence_array_create
dma_fence_chain_ops
@@ -2101,7 +2244,6 @@
dma_resv_test_signaled
dma_resv_wait_timeout
down_read_killable
down_write
drm_debugfs_create_files
drm_dev_enter
drm_dev_exit
@@ -2128,15 +2270,11 @@
drm_syncobj_get_fd
drm_syncobj_get_handle
drm_syncobj_replace_fence
fdget
__folio_put
find_vma
get_random_u32
__get_task_comm
get_user_pages_fast
handle_simple_irq
ida_destroy
idr_destroy
idr_find
idr_for_each
idr_get_next
idr_replace
jiffies64_to_msecs
@@ -2144,8 +2282,6 @@
memremap
memunmap
mmu_notifier_synchronize
page_pinner_inited
__page_pinner_put_page
param_ops_hexint
pci_assign_unassigned_bus_resources
pci_bus_resource_n
@@ -2154,7 +2290,6 @@
pci_iounmap
pci_msix_vec_count
pci_resize_resource
perf_pmu_unregister
pid_task
pin_user_pages_fast
pm_runtime_get_if_active
@@ -2166,11 +2301,11 @@
rb_next_postorder
reclaim_shmem_address_space
request_firmware_into_buf
set_normalized_timespec64
set_page_dirty
sg_alloc_table_from_pages_segment
shmem_file_setup
shmem_read_mapping_page_gfp
__show_mem
si_meminfo
sysfs_remove_files
__traceiter_gpu_mem_total
@@ -2215,8 +2350,6 @@
ttm_resource_manager_usage
ttm_sg_tt_init
ttm_tt_fini
unpin_user_pages
up_write
vm_get_page_prot
__wake_up_locked
ww_mutex_lock
@@ -2253,7 +2386,6 @@
device_show_int
device_store_bool
device_store_int
devm_krealloc
dma_buf_fd
kstrtobool_from_user
kstrtoull_from_user
@@ -2320,18 +2452,37 @@
spi_unregister_controller
# required by ssld.ko
bus_register
bus_unregister
driver_register
# required by stm.ko
cleanup_srcu_struct
compat_ptr_ioctl
config_item_get
config_item_put
device_match_name
kobject_set_name
__memcat_p
register_ftrace_export
__request_module
unregister_ftrace_export
vm_iomap_memory
# required by sub_pmic_pinctrl.ko
irq_domain_create_simple
# required by tsmux.ko
fget
# required by ufs-exynos-core.ko
blk_crypto_reprogram_all_keys
blk_mq_unique_tag
devm_blk_crypto_profile_init
of_cpu_node_to_id
msi_domain_first_desc
msi_lock_descs
msi_next_desc
msi_unlock_descs
platform_device_msi_free_irqs_all
platform_device_msi_init_and_alloc_irqs
__traceiter_android_vh_ufs_check_int_errors
__traceiter_android_vh_ufs_compl_command
__traceiter_android_vh_ufs_fill_prdt
@@ -2347,6 +2498,7 @@
ufshcd_dme_set_attr
ufshcd_hold
ufshcd_mcq_config_mac
ufshcd_mcq_enable_esi
ufshcd_mcq_make_queues_operational
ufshcd_mcq_poll_cqe_lock
ufshcd_mcq_write_cqis
@@ -2369,7 +2521,6 @@
# required by usb_f_rndis_mp.ko
alloc_etherdev_mqs
dev_get_stats
dev_valid_name
eth_mac_addr
ethtool_op_get_link
@@ -2392,8 +2543,6 @@
bio_start_io_acct
__blk_alloc_disk
copy_page
__cpuhp_state_add_instance
__cpuhp_state_remove_instance
del_gendisk
device_add_disk
flush_dcache_page
@@ -2431,3 +2580,16 @@
# required by xperf.ko
cpufreq_quick_get_max
stpcpy
# preserved by --additions-only
drm_atomic_helper_setup_commit
drm_crtc_send_vblank_event
drm_crtc_vblank_on
drm_property_lookup_blob
drm_property_replace_blob
drm_wait_one_vblank
for_each_kernel_tracepoint
__page_frag_cache_drain
phy_set_mode_ext
__traceiter_android_rvh_after_enqueue_task
__tracepoint_android_rvh_after_enqueue_task

View File

@@ -67,6 +67,8 @@
__traceiter_android_vh_show_smap
__traceiter_android_vh_smaps_pte_entry
__traceiter_android_vh_split_large_folio_bypass
__traceiter_android_vh_try_to_freeze_todo
__traceiter_android_vh_try_to_freeze_todo_unfrozen
__traceiter_android_vh_tune_scan_control
__traceiter_android_vh_use_vm_swappiness
__traceiter_android_vh_warn_alloc_tune_ratelimit
@@ -112,6 +114,8 @@
__tracepoint_android_vh_smaps_pte_entry
__tracepoint_android_vh_split_large_folio_bypass
__tracepoint_android_vh_tune_scan_control
__tracepoint_android_vh_try_to_freeze_todo
__tracepoint_android_vh_try_to_freeze_todo_unfrozen
__tracepoint_android_vh_use_vm_swappiness
__tracepoint_android_vh_warn_alloc_tune_ratelimit
__tracepoint_android_vh_warn_alloc_show_mem_bypass

View File

@@ -38,6 +38,7 @@
folio_referenced
reclaim_pages
percpu_ref_is_zero
vm_normal_folio_pmd
__mod_lruvec_state
__mod_zone_page_state
__pte_offset_map_lock

View File

@@ -1659,6 +1659,7 @@
mempool_free
mempool_free_slab
mempool_init_noprof
mempool_resize
memremap
memscan
mem_section
@@ -3035,6 +3036,10 @@
__traceiter_android_vh_freq_qos_add_request
__traceiter_android_vh_freq_qos_remove_request
__traceiter_android_vh_freq_qos_update_request
__traceiter_android_vh_gzvm_destroy_vm_post_process
__traceiter_android_vh_gzvm_handle_demand_page_post
__traceiter_android_vh_gzvm_handle_demand_page_pre
__traceiter_android_vh_gzvm_vcpu_exit_reason
__traceiter_android_vh_iommu_iovad_alloc_iova
__traceiter_android_vh_iommu_iovad_free_iova
__traceiter_android_vh_is_fpsimd_save
@@ -3159,6 +3164,10 @@
__tracepoint_android_vh_freq_qos_add_request
__tracepoint_android_vh_freq_qos_remove_request
__tracepoint_android_vh_freq_qos_update_request
__tracepoint_android_vh_gzvm_destroy_vm_post_process
__tracepoint_android_vh_gzvm_handle_demand_page_post
__tracepoint_android_vh_gzvm_handle_demand_page_pre
__tracepoint_android_vh_gzvm_vcpu_exit_reason
__tracepoint_android_vh_iommu_iovad_alloc_iova
__tracepoint_android_vh_iommu_iovad_free_iova
__tracepoint_android_vh_is_fpsimd_save

View File

@@ -1,5 +1,4 @@
[abi_symbol_list]
access_process_vm
add_timer
add_uevent_var
add_wait_queue
@@ -12,16 +11,17 @@
__alloc_skb
alloc_workqueue
alt_cb_patch_nops
android_rvh_probe_register
anon_inode_getfd
anon_inode_getfile
arc4_crypt
arc4_setkey
__arch_copy_from_user
__arch_copy_to_user
arch_timer_read_counter
arm64_use_ng_mappings
__arm_smccc_hvc
__arm_smccc_smc
atomic_notifier_call_chain
atomic_notifier_chain_register
atomic_notifier_chain_unregister
autoremove_wake_function
@@ -78,6 +78,7 @@
bpf_trace_run8
__bread_gfp
__brelse
bsearch
bt_err
bt_info
bt_procfs_cleanup
@@ -95,6 +96,7 @@
bus_unregister
bus_unregister_notifier
cache_line_size
caches_clean_inval_pou
call_rcu
cancel_delayed_work
cancel_delayed_work_sync
@@ -176,17 +178,18 @@
component_unbind_all
console_printk
console_suspend_enabled
console_verbose
__const_udelay
consume_skb
contig_page_data
copy_from_iter_toio
copy_from_kernel_nofault
copy_from_user_nofault
__copy_overflow
copy_page
copy_page_from_iter_atomic
copy_to_iter_fromio
cpu_bit_bitmap
cpufreq_boost_enabled
cpufreq_cpu_get_raw
cpufreq_dbs_governor_exit
cpufreq_dbs_governor_init
@@ -195,20 +198,19 @@
cpufreq_dbs_governor_stop
__cpufreq_driver_target
cpufreq_freq_attr_scaling_available_freqs
cpufreq_freq_attr_scaling_boost_freqs
cpufreq_generic_frequency_table_verify
cpufreq_register_driver
cpufreq_register_governor
cpufreq_table_index_unsorted
cpufreq_unregister_governor
cpufreq_update_policy
cpu_hotplug_disable
cpu_hotplug_enable
__cpuhp_remove_state
__cpuhp_setup_state
__cpuhp_state_add_instance
__cpuhp_state_remove_instance
__cpu_online_mask
cpu_pm_register_notifier
cpu_pm_unregister_notifier
__cpu_possible_mask
__cpu_present_mask
cpus_read_lock
@@ -427,7 +429,6 @@
dma_buf_vmap_unlocked
dma_buf_vunmap
dma_buf_vunmap_unlocked
dma_contiguous_default_area
dma_fence_add_callback
dma_fence_array_create
dma_fence_array_ops
@@ -666,7 +667,6 @@
find_get_pid
_find_next_bit
_find_next_zero_bit
find_pid_ns
find_task_by_vpid
find_vma
find_vma_intersection
@@ -685,7 +685,6 @@
folio_wait_bit
follow_pfnmap_end
follow_pfnmap_start
font_vga_8x16
for_each_kernel_tracepoint
fput
frame_vector_to_pages
@@ -708,6 +707,7 @@
fs_param_is_uid
__fs_parse
fs_umode_to_dtype
ftrace_dump
fwnode_property_present
fwnode_property_read_string
fwnode_property_read_u32_array
@@ -743,22 +743,20 @@
gen_pool_virt_to_phys
get_cpu_device
get_cpu_idle_time_us
get_current_tty
get_device
get_file_rcu
get_free_pages_noprof
get_net_ns_by_fd
get_net_ns_by_pid
get_pid_task
get_random_bytes
get_random_u16
get_random_u32
__get_task_comm
get_task_mm
get_tree_bdev
get_unused_fd_flags
get_user_pages
get_user_pages_fast
get_user_pages_remote
gov_update_cpu_data
gpiochip_add_data_with_key
gpiochip_generic_free
@@ -879,6 +877,7 @@
input_set_capability
input_unregister_device
input_unregister_handle
input_unregister_handler
insert_inode_locked
invalidate_bdev
invalidate_bh_lrus
@@ -896,12 +895,11 @@
iommu_group_ref_get
ioremap_prot
iounmap
iov_iter_extract_pages
iov_iter_advance
iov_iter_kvec
iov_iter_npages
iov_iter_revert
iov_iter_zero
ipi_desc_get
iput
__irq_apply_affinity_hint
irq_check_status_bit
@@ -921,12 +919,17 @@
irq_get_irq_data
irq_modify_status
irq_of_parse_and_map
__irq_regs
__irq_resolve_mapping
__irq_set_handler
irq_set_irq_wake
irq_to_desc
irq_work_queue
is_bad_inode
is_console_locked
is_vmalloc_addr
is_vmalloc_or_module_addr
iterate_fd
iter_file_splice_write
jiffies
jiffies_64_to_clock_t
@@ -935,6 +938,7 @@
jiffies_to_usecs
kasan_flag_enabled
kasprintf
kernel_cpustat
kernel_kobj
kernel_param_lock
kernel_param_unlock
@@ -945,6 +949,7 @@
kern_path
key_put
keyring_alloc
__kfence_pool
__kfifo_alloc
__kfifo_free
__kfifo_from_user
@@ -955,6 +960,7 @@
__kfifo_out_r
__kfifo_to_user
kfree
kfree_const
kfree_link
kfree_sensitive
kfree_skb_list_reason
@@ -969,20 +975,23 @@
__kmem_cache_create_args
kmem_cache_destroy
kmem_cache_free
kmem_cache_shrink
kmemdup_array
kmemdup_noprof
kobject_add
kmsg_dump_get_line
kmsg_dump_register
kmsg_dump_rewind
kmsg_dump_unregister
kobject_create_and_add
kobject_del
kobject_init
kobject_init_and_add
kobject_put
kobject_uevent
kobject_uevent_env
kobj_sysfs_ops
krealloc_noprof
kstrdup
kstrdup_quotable_cmdline
kstrndup
kstrtobool
kstrtobool_from_user
kstrtoint
@@ -1054,7 +1063,6 @@
make_bad_inode
mark_buffer_dirty
__mark_inode_dirty
mas_empty_area_rev
mas_find
match_string
mbox_chan_received_data
@@ -1083,7 +1091,6 @@
__memcpy_toio
memdup_user
memmove
memory_read_from_buffer
memparse
memremap
mem_section
@@ -1118,7 +1125,6 @@
mmc_wait_for_cmd
mmc_wait_for_req
__mmdrop
mmput
mod_delayed_work_on
mod_node_page_state
mod_timer
@@ -1132,7 +1138,6 @@
__msecs_to_jiffies
msleep
msleep_interruptible
mtree_load
__mutex_init
mutex_is_locked
mutex_lock
@@ -1184,7 +1189,6 @@
nonseekable_open
noop_llseek
nr_cpu_ids
nr_ipi_get
nr_irqs
nr_swap_pages
nsecs_to_jiffies
@@ -1194,7 +1198,6 @@
of_address_to_resource
of_clk_add_provider
of_clk_get
of_clk_get_by_name
of_clk_get_from_provider
of_clk_src_onecell_get
of_count_phandle_with_args
@@ -1250,7 +1253,6 @@
of_reserved_mem_device_release
of_reserved_mem_lookup
of_translate_address
oops_in_progress
overflowgid
overflowuid
pagecache_get_page
@@ -1258,26 +1260,25 @@
__page_pinner_put_page
panic
panic_notifier_list
panic_timeout
param_array_ops
param_get_int
param_get_ulong
param_ops_bool
param_ops_byte
param_ops_charp
param_ops_int
param_ops_long
param_ops_short
param_ops_string
param_ops_uint
param_ops_ullong
param_ops_ulong
param_ops_ushort
param_set_ulong
path_put
pcpu_alloc_noprof
percpu_counter_add_batch
percpu_counter_batch
__per_cpu_offset
perf_event_enable
perf_event_sysfs_show
perf_pmu_migrate_context
perf_pmu_register
@@ -1313,6 +1314,7 @@
pinmux_generic_get_function_groups
pinmux_generic_get_function_name
pinmux_generic_remove_function
pin_user_pages_fast
pin_user_pages_remote
platform_bus_type
platform_device_add
@@ -1349,7 +1351,6 @@
__pm_runtime_use_autosuspend
__pm_stay_awake
pm_stay_awake
pm_system_wakeup
pm_wakeup_dev_event
pm_wakeup_ws_event
pm_wq
@@ -1372,6 +1373,7 @@
proc_create_data
proc_create_single_data
proc_mkdir
proc_mkdir_data
proc_remove
proc_set_user
proto_register
@@ -1393,11 +1395,16 @@
pwmchip_remove
queue_delayed_work_on
queue_work_on
radix_tree_delete
radix_tree_insert
radix_tree_lookup
radix_tree_next_chunk
___ratelimit
_raw_read_lock
_raw_read_lock_irqsave
_raw_read_trylock
_raw_read_unlock
_raw_read_unlock_irqrestore
_raw_spin_lock
_raw_spin_lock_bh
_raw_spin_lock_irq
@@ -1415,9 +1422,11 @@
_raw_write_unlock_irqrestore
rb_erase
rb_first
rb_first_postorder
rb_insert_color
rb_last
rb_next
rb_next_postorder
rb_prev
rb_replace_node
rcu_barrier
@@ -1451,6 +1460,7 @@
register_syscore_ops
__register_virtio_driver
register_vmap_purge_notifier
register_wide_hw_breakpoint
regmap_bulk_read
regmap_bulk_write
regmap_exit
@@ -1516,12 +1526,10 @@
rproc_add_carveout
rproc_alloc
rproc_boot
rproc_da_to_va
rproc_del
rproc_free
rproc_mem_entry_init
rproc_of_resm_mem_entry_init
rproc_report_crash
rproc_shutdown
rproc_vq_interrupt
rtc_add_group
@@ -1532,7 +1540,6 @@
rtnl_is_locked
rtnl_lock
rtnl_unlock
runqueues
sampling_rate_store
sb_set_blocksize
sched_clock
@@ -1541,13 +1548,14 @@
sched_set_normal
sched_setscheduler
sched_setscheduler_nocheck
sched_show_task
schedule
schedule_hrtimeout
schedule_timeout
scnprintf
send_sig
seq_hex_dump
seq_list_next
seq_list_start
seq_lseek
seq_open
seq_open_private
@@ -1583,6 +1591,7 @@
set_page_dirty_lock
__SetPageMovable
set_user_nice
set_worker_desc
sg_alloc_table
sg_alloc_table_from_pages_segment
sg_free_table
@@ -1599,6 +1608,7 @@
shrinker_alloc
shrinker_free
shrinker_register
si_meminfo
simple_attr_open
simple_attr_release
simple_open
@@ -1635,6 +1645,7 @@
skip_spaces
sk_skb_reason_drop
smp_call_function_single_async
smp_call_on_cpu
snd_card_free
snd_card_new
snd_card_ref
@@ -1669,8 +1680,6 @@
snd_pcm_set_ops
snd_pcm_stop
snd_pcm_stop_xrun
_snd_pcm_stream_lock_irqsave
snd_pcm_stream_unlock_irqrestore
snd_soc_bytes_info_ext
snd_soc_bytes_tlv_callback
snd_soc_info_enum_double
@@ -1698,23 +1707,28 @@
sock_no_shutdown
sock_no_socketpair
sock_release
sort
__spi_alloc_controller
spi_bus_type
spi_finalize_current_message
spi_new_device
spi_register_controller
__spi_register_driver
spi_setup
spi_sync
spi_target_abort
split_page
sprintf
sprint_symbol_no_offset
srcu_init_notifier_head
srcu_notifier_call_chain
srcu_notifier_chain_register
srcu_notifier_chain_unregister
sscanf
__stack_chk_fail
stack_depot_fetch
stack_trace_save
stack_trace_save_regs
stack_trace_save_tsk
static_key_disable
static_key_enable
static_key_slow_dec
@@ -1786,8 +1800,6 @@
system_power_efficient_wq
system_unbound_wq
system_wq
sys_tz
task_active_pid_ns
__tasklet_hi_schedule
tasklet_init
tasklet_kill
@@ -1804,6 +1816,7 @@
timer_delete
timer_delete_sync
_totalram_pages
touch_softlockup_watchdog
trace_array_destroy
trace_array_get_by_name
trace_array_init_printk
@@ -1816,24 +1829,50 @@
trace_event_raw_init
trace_event_reg
trace_handle_return
__traceiter_android_rvh_schedule_bug
__traceiter_android_vh_meminfo_proc_show
__traceiter_android_vh_oom_check_panic
__traceiter_android_vh_sysrq_crash
__traceiter_android_vh_try_to_freeze_todo_unfrozen
__traceiter_device_pm_callback_end
__traceiter_device_pm_callback_start
__traceiter_gpu_mem_total
__traceiter_kfree
__traceiter_kmalloc
__traceiter_kmem_cache_alloc
__traceiter_kmem_cache_free
__traceiter_mmap_lock_acquire_returned
__traceiter_mmap_lock_released
__traceiter_mmap_lock_start_locking
__traceiter_suspend_resume
trace_output_call
__tracepoint_android_rvh_schedule_bug
__tracepoint_android_vh_meminfo_proc_show
__tracepoint_android_vh_oom_check_panic
__tracepoint_android_vh_sysrq_crash
__tracepoint_android_vh_try_to_freeze_todo_unfrozen
__tracepoint_device_pm_callback_end
__tracepoint_device_pm_callback_start
__tracepoint_gpu_mem_total
__tracepoint_kfree
__tracepoint_kmalloc
__tracepoint_kmem_cache_alloc
__tracepoint_kmem_cache_free
__tracepoint_mmap_lock_acquire_returned
__tracepoint_mmap_lock_released
__tracepoint_mmap_lock_start_locking
tracepoint_probe_register
tracepoint_probe_unregister
tracepoint_srcu
__tracepoint_suspend_resume
trace_print_array_seq
trace_print_flags_seq
trace_print_symbols_seq
__trace_puts
trace_raw_output_prep
trace_set_clr_event
__trace_trigger_soft_disabled
tracing_off
truncate_inode_pages_final
truncate_pagecache
truncate_setsize
@@ -1876,6 +1915,7 @@
unregister_filesystem
unregister_inet6addr_notifier
unregister_inetaddr_notifier
unregister_kprobe
unregister_kretprobe
unregister_module_notifier
unregister_netdev
@@ -1886,8 +1926,10 @@
unregister_pm_notifier
unregister_reboot_notifier
unregister_rpmsg_driver
unregister_syscore_ops
unregister_virtio_driver
unregister_vmap_purge_notifier
unregister_wide_hw_breakpoint
up
update_devfreq
up_read
@@ -2090,11 +2132,12 @@
vm_get_page_prot
vm_map_pages
vm_map_ram
vm_node_stat
vm_unmapped_area
vm_unmap_ram
vm_zone_stat
vscnprintf
vsnprintf
vsprintf
vunmap
vzalloc_noprof
wait_for_completion
@@ -2112,6 +2155,8 @@
wakeup_source_register
wakeup_source_remove
wakeup_source_unregister
walk_iomem_res_desc
__warn_flushing_systemwide_wq
__warn_printk
watchdog_init_timeout
watchdog_register_device
@@ -2120,11 +2165,14 @@
wireless_nlevent_flush
wireless_send_event
woken_wake_function
work_on_cpu_safe_key
write_cache_pages
write_inode_now
ww_mutex_lock
ww_mutex_unlock
x509_load_certificate_list
xas_load
__xas_next
xhci_add_endpoint
xhci_check_bandwidth
xhci_drop_endpoint

View File

@@ -331,6 +331,7 @@
__traceiter_android_vh_tune_swappiness
__traceiter_android_vh_unreserve_highatomic_bypass
__traceiter_android_vh_vmscan_kswapd_done
__traceiter_android_vh_page_cache_ra_order_bypass
__traceiter_block_bio_queue
__traceiter_block_getrq
__traceiter_block_rq_complete
@@ -470,6 +471,7 @@
__tracepoint_android_vh_tune_swappiness
__tracepoint_android_vh_unreserve_highatomic_bypass
__tracepoint_android_vh_vmscan_kswapd_done
__tracepoint_android_vh_page_cache_ra_order_bypass
__tracepoint_block_bio_queue
__tracepoint_block_getrq
__tracepoint_block_rq_complete
@@ -535,5 +537,7 @@
zstd_init_cctx
zstd_init_dctx
zstd_is_error
is_ashmem_file
ashmem_area_name
ashmem_area_size
ashmem_area_vmfile

View File

@@ -2531,6 +2531,7 @@
trace_event_reg
trace_handle_return
__traceiter_android_rvh_attach_entity_load_avg
__traceiter_android_rvh_build_perf_domains
__traceiter_android_rvh_can_migrate_task
__traceiter_android_rvh_cgroup_force_kthread_migration
__traceiter_android_rvh_check_preempt_wakeup_fair
@@ -2652,6 +2653,7 @@
__traceiter_workqueue_execute_start
trace_output_call
__tracepoint_android_rvh_attach_entity_load_avg
__tracepoint_android_rvh_build_perf_domains
__tracepoint_android_rvh_can_migrate_task
__tracepoint_android_rvh_cgroup_force_kthread_migration
__tracepoint_android_rvh_check_preempt_wakeup_fair
@@ -2979,6 +2981,7 @@
vb2_streamon
vb2_vmalloc_memops
vb2_wait_for_all_buffers
vendor_data_pad
verify_pkcs7_signature
vfree
video_devdata

View File

@@ -1259,6 +1259,9 @@
get_user_ifreq
get_user_pages
get_zeroed_page_noprof
gic_v3_cpu_init
gic_v3_dist_init
gic_v3_dist_wait_for_rwp
gov_attr_set_init
gov_attr_set_put
governor_sysfs_ops

View File

@@ -1367,6 +1367,7 @@
rpmsg_register_device
rpmsg_register_device_override
rproc_get_by_phandle
rpmsg_chrdev_eptdev_create
# required by rtc-rtk.ko
mktime64

View File

@@ -31,6 +31,10 @@
__traceiter_android_vh_do_shrink_slab
__traceiter_android_vh_shrink_slab_bypass
# required by health_report module
__tracepoint_android_vh_health_report
__traceiter_android_vh_health_report
# required by mi_mem_engine.ko
__traceiter_android_vh_tune_swappiness
__tracepoint_android_vh_tune_swappiness
@@ -143,8 +147,13 @@
#required by mi_sched.ko
__traceiter_android_vh_scheduler_tick
__traceiter_android_vh_free_user
__traceiter_android_vh_alloc_uid
__tracepoint_android_vh_scheduler_tick
__tracepoint_android_vh_free_user
__tracepoint_android_vh_alloc_uid
free_uid
find_user
#required by mi_freqwdg.ko
__traceiter_android_vh_freq_qos_remove_request
@@ -171,6 +180,10 @@
blk_mq_sched_try_insert_merge
elv_rqhash_add
# required by mi_damon.ko
folio_deactivate
folio_mark_accessed
#required by cifs.ko
add_swap_extent
asn1_ber_decoder
@@ -382,6 +395,20 @@
__tracepoint_android_vh_psi_event
__tracepoint_android_vh_psi_group
#required by io_monitor.ko
__tracepoint_f2fs_gc_begin
__tracepoint_f2fs_gc_end
__tracepoint_f2fs_write_checkpoint
__tracepoint_f2fs_sync_file_enter
__tracepoint_f2fs_sync_file_exit
__tracepoint_ufshcd_command
__traceiter_f2fs_gc_begin
__traceiter_f2fs_gc_end
__traceiter_f2fs_write_checkpoint
__traceiter_f2fs_sync_file_enter
__traceiter_f2fs_sync_file_exit
__traceiter_ufshcd_command
#required by zram.ko module
__blk_alloc_disk
bio_end_io_acct_remapped

View File

@@ -392,7 +392,6 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) ((k)->arch.vgic.initialized)
#define vgic_ready(k) ((k)->arch.vgic.ready)
#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \
((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS))

View File

@@ -211,6 +211,7 @@ struct kvm_io_range {
struct kvm_io_bus {
int dev_count;
int ioeventfd_count;
struct rcu_head rcu;
struct kvm_io_range range[];
};

View File

@@ -1261,8 +1261,8 @@ struct task_struct {
enum blocked_on_state blocked_on_state;
struct mutex *blocked_on; /* lock we're blocked on */
struct task_struct *blocked_donor; /* task that is boosting this task */
#ifdef CONFIG_SCHED_PROXY_EXEC
struct list_head migration_node;
#ifdef CONFIG_SCHED_PROXY_EXEC
struct list_head blocked_head; /* tasks blocked on this task */
struct list_head blocked_node; /* our entry on someone elses blocked_head */
/* Node for list of tasks to process blocked_head list for blocked entitiy activations */
@@ -2198,6 +2198,18 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock);
__cond_resched_rwlock_write(lock); \
})
static inline void __force_blocked_on_runnable(struct task_struct *p)
{
lockdep_assert_held(&p->blocked_lock);
p->blocked_on_state = BO_RUNNABLE;
}
static inline void force_blocked_on_runnable(struct task_struct *p)
{
guard(raw_spinlock_irqsave)(&p->blocked_lock);
__force_blocked_on_runnable(p);
}
static inline void __set_blocked_on_runnable(struct task_struct *p)
{
lockdep_assert_held(&p->blocked_lock);
@@ -2208,17 +2220,14 @@ static inline void __set_blocked_on_runnable(struct task_struct *p)
static inline void set_blocked_on_runnable(struct task_struct *p)
{
unsigned long flags;
if (!sched_proxy_exec())
return;
raw_spin_lock_irqsave(&p->blocked_lock, flags);
guard(raw_spinlock_irqsave)(&p->blocked_lock);
__set_blocked_on_runnable(p);
raw_spin_unlock_irqrestore(&p->blocked_lock, flags);
}
static inline void set_blocked_on_waking(struct task_struct *p)
static inline void __set_blocked_on_waking(struct task_struct *p)
{
lockdep_assert_held(&p->blocked_lock);
@@ -2226,25 +2235,37 @@ static inline void set_blocked_on_waking(struct task_struct *p)
p->blocked_on_state = BO_WAKING;
}
static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m)
static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m)
{
lockdep_assert_held(&p->blocked_lock);
WARN_ON_ONCE(!m);
/* The task should only be setting itself as blocked */
WARN_ON_ONCE(p != current);
/* Currently we serialize blocked_on under the task::blocked_lock */
lockdep_assert_held_once(&p->blocked_lock);
/*
* Check we are clearing values to NULL or setting NULL
* to values to ensure we don't overwrite existing mutex
* values or clear already cleared values
* Check ensure we don't overwrite existing mutex value
* with a different mutex.
*/
WARN_ON((!m && !p->blocked_on) || (m && p->blocked_on));
WARN_ON_ONCE(p->blocked_on);
p->blocked_on = m;
p->blocked_on_state = m ? BO_BLOCKED : BO_RUNNABLE;
p->blocked_on_state = BO_BLOCKED;
}
static inline struct mutex *get_task_blocked_on(struct task_struct *p)
static inline void __clear_task_blocked_on(struct task_struct *p, struct mutex *m)
{
lockdep_assert_held(&p->blocked_lock);
/* The task should only be clearing itself */
WARN_ON_ONCE(p != current);
/* Currently we serialize blocked_on under the task::blocked_lock */
lockdep_assert_held_once(&p->blocked_lock);
/* Make sure we are clearing the relationship with the right lock */
WARN_ON_ONCE(p->blocked_on != m);
p->blocked_on = NULL;
p->blocked_on_state = BO_RUNNABLE;
}
static inline struct mutex *__get_task_blocked_on(struct task_struct *p)
{
lockdep_assert_held_once(&p->blocked_lock);
return p->blocked_on;
}

View File

@@ -0,0 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM gzvm
#define TRACE_INCLUDE_PATH trace/hooks
#if !defined(_TRACE_HOOK_GZVM_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOOK_GZVM_H
#include <trace/hooks/vendor_hooks.h>
struct gzvm_vcpu;
struct gzvm;
DECLARE_HOOK(android_vh_gzvm_vcpu_exit_reason,
TP_PROTO(struct gzvm_vcpu *vcpu, bool *userspace),
TP_ARGS(vcpu, userspace));
DECLARE_HOOK(android_vh_gzvm_handle_demand_page_pre,
TP_PROTO(struct gzvm *vm, int memslot_id, u64 pfn, u64 gfn, u32 nr_entries),
TP_ARGS(vm, memslot_id, pfn, gfn, nr_entries));
DECLARE_HOOK(android_vh_gzvm_handle_demand_page_post,
TP_PROTO(struct gzvm *vm, int memslot_id, u64 pfn, u64 gfn, u32 nr_entries),
TP_ARGS(vm, memslot_id, pfn, gfn, nr_entries));
DECLARE_HOOK(android_vh_gzvm_destroy_vm_post_process,
TP_PROTO(struct gzvm *vm),
TP_ARGS(vm));
#endif /* _TRACE_HOOK_GZVM_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@@ -376,6 +376,10 @@ DECLARE_HOOK(android_vh_page_cache_readahead_start,
DECLARE_HOOK(android_vh_page_cache_readahead_end,
TP_PROTO(struct file *file, pgoff_t pgoff),
TP_ARGS(file, pgoff));
DECLARE_HOOK(android_vh_page_cache_ra_order_bypass,
TP_PROTO(struct readahead_control *ractl, struct file_ra_state *ra,
int new_order, gfp_t *gfp, bool *bypass),
TP_ARGS(ractl, ra, new_order, gfp, bypass));
DECLARE_HOOK(android_vh_filemap_fault_start,
TP_PROTO(struct file *file, pgoff_t pgoff),
TP_ARGS(file, pgoff));

29
include/trace/hooks/usb.h Normal file
View File

@@ -0,0 +1,29 @@
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM usb
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH trace/hooks
#if !defined(_TRACE_HOOK_USB_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOOK_USB_H
#include <trace/hooks/vendor_hooks.h>
struct usb_device;
/*
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
DECLARE_RESTRICTED_HOOK(android_rvh_usb_dev_suspend,
TP_PROTO(struct usb_device *udev, pm_message_t msg, bool *bypass),
TP_ARGS(udev, msg, bypass), 1);
DECLARE_HOOK(android_vh_usb_dev_resume,
TP_PROTO(struct usb_device *udev, pm_message_t msg, bool *bypass),
TP_ARGS(udev, msg, bypass));
#endif /* _TRACE_HOOK_USB_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@@ -0,0 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM user
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH trace/hooks
#if !defined(_TRACE_HOOK_USER_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOOK_USER_H
#include <trace/hooks/vendor_hooks.h>
struct user_struct;
DECLARE_HOOK(android_vh_alloc_uid,
TP_PROTO(struct user_struct *user),
TP_ARGS(user));
DECLARE_HOOK(android_vh_free_user,
TP_PROTO(struct user_struct *up),
TP_ARGS(up));
#endif /* _TRACE_HOOK_USER_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@@ -706,6 +706,9 @@ enum ufshcd_quirks {
enum ufshcd_android_quirks {
/* Set IID to one. */
UFSHCD_ANDROID_QUIRK_SET_IID_TO_ONE = 1 << 0,
/* Do not read IS after H8 enter */
UFSHCD_ANDROID_QUIRK_NO_IS_READ_ON_H8 = 1 << 1,
};
enum ufshcd_caps {

View File

@@ -172,6 +172,15 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
#ifdef CONFIG_CPUSETS
.mems_allowed_seq = SEQCNT_SPINLOCK_ZERO(init_task.mems_allowed_seq,
&init_task.alloc_lock),
#endif
.blocked_on_state = BO_RUNNABLE,
.blocked_donor = NULL,
.migration_node = LIST_HEAD_INIT(init_task.migration_node),
#ifdef CONFIG_SCHED_PROXY_EXEC
.blocked_head = LIST_HEAD_INIT(init_task.blocked_head),
.blocked_node = LIST_HEAD_INIT(init_task.blocked_node),
.blocked_activation_node = LIST_HEAD_INIT(init_task.blocked_activation_node),
.sleeping_owner = NULL,
#endif
#ifdef CONFIG_RT_MUTEXES
.pi_waiters = RB_ROOT_CACHED,

View File

@@ -2356,8 +2356,8 @@ __latent_entropy struct task_struct *copy_process(
p->blocked_on_state = BO_RUNNABLE;
p->blocked_on = NULL; /* not blocked yet */
p->blocked_donor = NULL; /* nobody is boosting p yet */
#ifdef CONFIG_SCHED_PROXY_EXEC
INIT_LIST_HEAD(&p->migration_node);
#ifdef CONFIG_SCHED_PROXY_EXEC
INIT_LIST_HEAD(&p->blocked_head);
INIT_LIST_HEAD(&p->blocked_node);
INIT_LIST_HEAD(&p->blocked_activation_node);

View File

@@ -705,8 +705,10 @@ int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, ktime_t *abs_time
ret = __futex_wait(uaddr, flags, val, to, bitset);
/* No timeout, nothing to clean up. */
if (!to)
if (!to) {
trace_android_vh_futex_wait_end(flags, bitset);
return ret;
}
hrtimer_cancel(&to->timer);
destroy_hrtimer_on_stack(&to->timer);
@@ -719,6 +721,7 @@ int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, ktime_t *abs_time
restart->futex.bitset = bitset;
restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
trace_android_vh_futex_wait_end(flags, bitset);
return set_restart_fn(restart, futex_wait_restart);
}

View File

@@ -54,13 +54,13 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
lockdep_assert_held(&lock->wait_lock);
/* Current thread can't be already blocked (since it's executing!) */
DEBUG_LOCKS_WARN_ON(get_task_blocked_on(task));
DEBUG_LOCKS_WARN_ON(__get_task_blocked_on(task));
}
void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task)
{
struct mutex *blocked_on = get_task_blocked_on(task);
struct mutex *blocked_on = __get_task_blocked_on(task);
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
DEBUG_LOCKS_WARN_ON(waiter->task != task);

View File

@@ -652,7 +652,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
}
trace_android_vh_mutex_wait_start(lock);
set_task_blocked_on(current, lock);
__set_task_blocked_on(current, lock);
set_current_state(state);
trace_contention_begin(lock, LCB_F_MUTEX);
for (;;) {
@@ -713,8 +713,10 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
bool opt_acquired;
/*
* mutex_optimistic_spin() can schedule, so we need to
* release these locks before calling it.
* mutex_optimistic_spin() can call schedule(), so
* we need to release these locks before calling it,
* and clear blocked on so we don't become unselectable
* to run.
*/
current->blocked_on_state = BO_RUNNABLE;
raw_spin_unlock(&current->blocked_lock);
@@ -729,7 +731,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
trace_contention_begin(lock, LCB_F_MUTEX);
}
}
set_task_blocked_on(current, NULL);
__clear_task_blocked_on(current, lock);
__set_current_state(TASK_RUNNING);
trace_android_vh_mutex_wait_finish(lock);
@@ -763,12 +765,12 @@ skip_wait:
return 0;
err:
set_task_blocked_on(current, NULL);
__clear_task_blocked_on(current, lock);
__set_current_state(TASK_RUNNING);
trace_android_vh_mutex_wait_finish(lock);
__mutex_remove_waiter(lock, &waiter);
err_early_kill:
WARN_ON(get_task_blocked_on(current));
WARN_ON(__get_task_blocked_on(current));
trace_contention_end(lock, ret);
raw_spin_unlock(&current->blocked_lock);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
@@ -990,10 +992,10 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
struct mutex *next_lock;
raw_spin_lock_nested(&donor->blocked_lock, SINGLE_DEPTH_NESTING);
next_lock = get_task_blocked_on(donor);
next_lock = __get_task_blocked_on(donor);
if (next_lock == lock) {
next = donor;
set_blocked_on_waking(donor);
__set_blocked_on_waking(donor);
wake_q_add(&wake_q, donor);
current->blocked_donor = NULL;
}
@@ -1014,10 +1016,10 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
raw_spin_lock_nested(&next->blocked_lock, SINGLE_DEPTH_NESTING);
debug_mutex_wake_waiter(lock, waiter);
WARN_ON(get_task_blocked_on(next) != lock);
set_blocked_on_waking(next);
wake_q_add(&wake_q, next);
WARN_ON_ONCE(__get_task_blocked_on(next) != lock);
__set_blocked_on_waking(next);
raw_spin_unlock(&next->blocked_lock);
wake_q_add(&wake_q, next);
}
if (owner & MUTEX_FLAG_HANDOFF)

View File

@@ -281,21 +281,20 @@ __ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
return false;
if (waiter->ww_ctx->acquired > 0 && __ww_ctx_less(waiter->ww_ctx, ww_ctx)) {
/* nested as we should hold current->blocked_lock already */
raw_spin_lock_nested(&waiter->task->blocked_lock, SINGLE_DEPTH_NESTING);
#ifndef WW_RT
debug_mutex_wake_waiter(lock, waiter);
#endif
/* nested as we should hold current->blocked_lock already */
raw_spin_lock_nested(&waiter->task->blocked_lock, SINGLE_DEPTH_NESTING);
/*
* When waking up the task to die, be sure to set the
* blocked_on_state to WAKING. Otherwise we can see
* circular blocked_on relationships that can't
* resolve.
* blocked_on_state to BO_WAKING. Otherwise we can see
* circular blocked_on relationships that can't resolve.
*/
WARN_ON(get_task_blocked_on(waiter->task) != lock);
#endif
set_blocked_on_waking(waiter->task);
wake_q_add(wake_q, waiter->task);
WARN_ON_ONCE(__get_task_blocked_on(waiter->task) != lock);
__set_blocked_on_waking(waiter->task);
raw_spin_unlock(&waiter->task->blocked_lock);
wake_q_add(wake_q, waiter->task);
}
return true;
@@ -347,12 +346,12 @@ static bool __ww_mutex_wound(struct MUTEX *lock,
raw_spin_lock_nested(&owner->blocked_lock, SINGLE_DEPTH_NESTING);
/*
* When waking up the task to wound, be sure to set the
* blocked_on_state flag. Otherwise we can see circular
* blocked_on relationships that can't resolve.
* blocked_on_state to BO_WAKING. Otherwise we can see
* circular blocked_on relationships that can't resolve.
*/
set_blocked_on_waking(owner);
wake_q_add(wake_q, owner);
__set_blocked_on_waking(owner);
raw_spin_unlock(&owner->blocked_lock);
wake_q_add(wake_q, owner);
}
return true;
}

View File

@@ -102,13 +102,15 @@ static int try_to_freeze_tasks(bool user_only)
for_each_process_thread(g, p) {
if (p != current && freezing(p) && !frozen(p)) {
sched_show_task(p);
trace_android_vh_try_to_freeze_todo_unfrozen(p);
if (!wakeup)
trace_android_vh_try_to_freeze_todo_unfrozen(p);
}
}
read_unlock(&tasklist_lock);
}
trace_android_vh_try_to_freeze_todo(todo, elapsed_msecs, wq_busy);
if (!wakeup)
trace_android_vh_try_to_freeze_todo(todo, elapsed_msecs, wq_busy);
} else {
pr_info("Freezing %s completed (elapsed %d.%03d seconds)\n",
what, elapsed_msecs / 1000, elapsed_msecs % 1000);

View File

@@ -2141,7 +2141,7 @@ inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
return dequeue_task_result;
}
void activate_task(struct rq *rq, struct task_struct *p, int flags)
static inline void __activate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (task_on_rq_migrating(p))
flags |= ENQUEUE_MIGRATED;
@@ -2155,6 +2155,61 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
}
EXPORT_SYMBOL_GPL(activate_task);
#ifdef CONFIG_SCHED_PROXY_EXEC
static inline
void __proxy_remove_from_sleeping_owner(struct task_struct *owner, struct task_struct *p)
{
lockdep_assert_held(&owner->blocked_lock);
if (p->sleeping_owner == owner) {
list_del_init(&p->blocked_node);
WRITE_ONCE(p->sleeping_owner, NULL);
put_task_struct(owner); // matches get in proxy_enqueue_on_owner
}
}
static inline void proxy_remove_from_sleeping_owner(struct task_struct *p)
{
struct task_struct *owner = READ_ONCE(p->sleeping_owner);
if (owner) {
raw_spin_lock(&owner->blocked_lock);
__proxy_remove_from_sleeping_owner(owner, p);
raw_spin_unlock(&owner->blocked_lock);
}
}
void activate_task(struct rq *rq, struct task_struct *p, int flags)
{
if (!sched_proxy_exec()) {
__activate_task(rq, p, flags);
return;
}
lockdep_assert_rq_held(rq);
proxy_remove_from_sleeping_owner(p);
/*
* By calling __activate_task() with blocked_lock held, we
* order against the find_proxy_task() blocked_task case
* such that no more blocked tasks will be enqueued on p
* once we release p->blocked_lock.
*/
raw_spin_lock(&p->blocked_lock);
WARN_ON(task_cpu(p) != cpu_of(rq));
__activate_task(rq, p, flags);
raw_spin_unlock(&p->blocked_lock);
}
#else
static inline void proxy_remove_from_sleeping_owner(struct task_struct *p)
{
}
void activate_task(struct rq *rq, struct task_struct *p, int flags)
{
__activate_task(rq, p, flags);
}
#endif
void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
{
SCHED_WARN_ON(flags & DEQUEUE_SLEEP);
@@ -3794,68 +3849,14 @@ static inline void ttwu_do_wakeup(struct task_struct *p)
}
#ifdef CONFIG_SCHED_PROXY_EXEC
static inline
void __proxy_remove_from_sleeping_owner(struct task_struct *owner, struct task_struct *p)
{
lockdep_assert_held(&owner->blocked_lock);
if (p->sleeping_owner == owner) {
list_del_init(&p->blocked_node);
WRITE_ONCE(p->sleeping_owner, NULL);
put_task_struct(owner); // matches get in proxy_enqueue_on_owner
}
}
static inline void proxy_remove_from_sleeping_owner(struct task_struct *p)
{
struct task_struct *owner = READ_ONCE(p->sleeping_owner);
if (owner) {
raw_spin_lock(&owner->blocked_lock);
__proxy_remove_from_sleeping_owner(owner, p);
raw_spin_unlock(&owner->blocked_lock);
}
}
static void do_activate_task(struct rq *rq, struct task_struct *p, int en_flags)
{
if (!sched_proxy_exec()) {
activate_task(rq, p, en_flags);
return;
}
lockdep_assert_rq_held(rq);
proxy_remove_from_sleeping_owner(p);
/*
* By calling activate_task with blocked_lock held, we
* order against the find_proxy_task() blocked_task case
* such that no more blocked tasks will be enqueued on p
* once we release p->blocked_lock.
*/
raw_spin_lock(&p->blocked_lock);
WARN_ON(task_cpu(p) != cpu_of(rq));
activate_task(rq, p, en_flags);
raw_spin_unlock(&p->blocked_lock);
}
static bool proxy_task_runnable_but_waking(struct task_struct *p)
{
if (!sched_proxy_exec())
return false;
return (READ_ONCE(p->__state) == TASK_RUNNING &&
READ_ONCE(p->blocked_on_state) == BO_WAKING);
}
#ifdef CONFIG_SMP
static inline void proxy_set_task_cpu(struct task_struct *p, int cpu)
{
unsigned int wake_cpu;
/* Sanity check to make sure we can return safely */
WARN_ON(!is_cpu_allowed(p, p->wake_cpu));
/*
* Since we enqueuing blocked tasks on a cpu it may not
* be able to run on, preserve wake_cpu when we
* Since we are enqueuing a blocked task on a cpu it may
* not be able to run on, preserve wake_cpu when we
* __set_task_cpu so we can return the task to where it
* was previously runnable.
*/
@@ -3869,38 +3870,53 @@ static inline void proxy_set_task_cpu(struct task_struct *p, int cpu)
__set_task_cpu(p, cpu);
}
#endif /* CONFIG_SMP */
static bool proxy_task_runnable_but_waking(struct task_struct *p)
{
if (!sched_proxy_exec())
return false;
return (READ_ONCE(p->__state) == TASK_RUNNING &&
READ_ONCE(p->blocked_on_state) == BO_WAKING);
}
static void do_activate_blocked_waiter(struct rq *target_rq, struct task_struct *p, int en_flags)
{
unsigned long flags;
unsigned int state;
struct rq_flags rf;
int target_cpu = cpu_of(target_rq);
raw_spin_lock_irqsave(&p->pi_lock, flags);
state = READ_ONCE(p->__state);
/* Avoid racing with ttwu */
if (state == TASK_WAKING)
goto out;
scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
state = READ_ONCE(p->__state);
/* Avoid racing with ttwu */
if (state == TASK_WAKING)
return;
if (READ_ONCE(p->on_rq)) {
/*
* We raced with a non mutex handoff activation of p.
* That activation will also take care of activating
* all of the tasks after p in the blocked_head list,
* so we're done here.
*/
goto out;
if (READ_ONCE(p->on_rq)) {
/*
* We raced with a non mutex handoff activation of p.
* That activation will also take care of activating
* all of the tasks after p in the blocked_head list,
* so we're done here.
*/
return;
}
if (task_on_cpu(task_rq(p), p)) {
/*
* Its possible this activation is very late, and
* we already were woken up and are running on a
* different cpu. If that task blocked, it could be
* dequeued (so on_rq == 0), but still on_cpu.
* Bail in this case, as we definitely don't want to
* activate a task when its on_cpu elsewhere.
*/
return;
}
proxy_set_task_cpu(p, target_cpu);
rq_lock_irqsave(target_rq, &rf);
update_rq_clock(target_rq);
activate_task(target_rq, p, en_flags);
resched_curr(target_rq);
rq_unlock_irqrestore(target_rq, &rf);
}
proxy_set_task_cpu(p, target_cpu);
rq_lock_irqsave(target_rq, &rf);
update_rq_clock(target_rq);
do_activate_task(target_rq, p, en_flags);
resched_curr(target_rq);
rq_unlock_irqrestore(target_rq, &rf);
out:
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
}
static void activate_blocked_waiters(struct rq *target_rq,
@@ -3920,10 +3936,10 @@ static void activate_blocked_waiters(struct rq *target_rq,
en_flags |= ENQUEUE_MIGRATED;
/*
* A whole bunch of 'proxy' tasks back this blocked task, wake
* them all up to give this task its 'fair' share.
*/
/*
* A whole bunch of waiting donor tasks back this blocked
* lock owner task, wake them all up to give this task its
* 'fair' share.
*
* This is a little unique here and the locking is messy.
* At this point we only hold the blocked_lock, so the
* owner task may be able to run and do all sorts of
@@ -4105,16 +4121,6 @@ void move_queued_task_locked(struct rq *src_rq, struct rq *dst_rq, struct task_s
}
#endif /* CONFIG_SMP */
#else /* !CONFIG_SCHED_PROXY_EXEC */
static inline void proxy_remove_from_sleeping_owner(struct task_struct *p)
{
}
static inline void do_activate_task(struct rq *rq, struct task_struct *p,
int en_flags)
{
activate_task(rq, p, en_flags);
}
static bool proxy_task_runnable_but_waking(struct task_struct *p)
{
return false;
@@ -4128,6 +4134,13 @@ static inline void activate_blocked_waiters(struct rq *target_rq,
#endif /* CONFIG_SCHED_PROXY_EXEC */
#ifdef CONFIG_SMP
/*
* Checks to see if task p has been proxy-migrated to another rq
* and needs to be returned. If so, we deactivate the task here
* so that it can be properly woken up on the p->wake_cpu
* (or whichever cpu select_task_rq() picks at the bottom of
* try_to_wake_up()
*/
static inline bool proxy_needs_return(struct rq *rq, struct task_struct *p)
{
bool ret = false;
@@ -4136,7 +4149,7 @@ static inline bool proxy_needs_return(struct rq *rq, struct task_struct *p)
return false;
raw_spin_lock(&p->blocked_lock);
if (get_task_blocked_on(p) && p->blocked_on_state == BO_WAKING) {
if (__get_task_blocked_on(p) && p->blocked_on_state == BO_WAKING) {
if (!task_current(rq, p) && (p->wake_cpu != cpu_of(rq))) {
if (task_current_donor(rq, p)) {
put_prev_task(rq, p);
@@ -4161,6 +4174,7 @@ static inline bool proxy_needs_return(struct rq *rq, struct task_struct *p)
{
return false;
}
static inline void _trace_sched_pe_return_migration(struct task_struct *p)
{
}
@@ -4192,7 +4206,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
atomic_dec(&task_rq(p)->nr_iowait);
}
do_activate_task(rq, p, en_flags);
activate_task(rq, p, en_flags);
wakeup_preempt(rq, p, wake_flags);
ttwu_do_wakeup(p);
@@ -4260,6 +4274,10 @@ static int ttwu_runnable(struct task_struct *p, int wake_flags)
proxy_remove_from_sleeping_owner(p);
enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
}
if (proxy_needs_return(rq, p)) {
_trace_sched_pe_return_migration(p);
goto out;
}
if (!task_on_cpu(rq, p)) {
/*
* When on_rq && !on_cpu the task is preempted, see if
@@ -4267,10 +4285,6 @@ static int ttwu_runnable(struct task_struct *p, int wake_flags)
*/
wakeup_preempt(rq, p, wake_flags);
}
if (proxy_needs_return(rq, p)) {
_trace_sched_pe_return_migration(p);
goto out;
}
ttwu_do_wakeup(p);
ret = 1;
}
@@ -4411,6 +4425,11 @@ static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
if (task_on_scx(p))
return false;
#ifdef CONFIG_SMP
if (p->sched_class == &stop_sched_class)
return false;
#endif
/*
* Do not complicate things with the async wake_list while the CPU is
* in hotplug state.
@@ -5541,6 +5560,7 @@ static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
}
}
#ifdef CONFIG_SCHED_PROXY_EXEC
/*
* Only called from __schedule context
*
@@ -5568,6 +5588,7 @@ static void zap_balance_callbacks(struct rq *rq)
}
rq->balance_callback = found ? &balance_push_callback : NULL;
}
#endif /* CONFIG_SCHED_PROXY_EXEC */
static void balance_push(struct rq *rq);
@@ -5637,9 +5658,11 @@ void balance_callbacks(struct rq *rq, struct balance_callback *head)
#else
#ifdef CONFIG_SCHED_PROXY_EXEC
static inline void zap_balance_callbacks(struct rq *rq)
{
}
#endif /* CONFIG_SCHED_PROXY_EXEC */
static inline void __balance_callbacks(struct rq *rq)
{
@@ -7162,13 +7185,10 @@ static bool try_to_block_task(struct rq *rq, struct task_struct *p,
}
#ifdef CONFIG_SCHED_PROXY_EXEC
static inline struct task_struct *
proxy_resched_idle(struct rq *rq)
static inline struct task_struct *proxy_resched_idle(struct rq *rq)
{
put_prev_task(rq, rq->donor);
put_prev_set_next_task(rq, rq->donor, rq->idle);
rq_set_donor(rq, rq->idle);
set_next_task(rq, rq->idle);
set_tsk_need_resched(rq->idle);
return rq->idle;
}
@@ -7189,11 +7209,10 @@ proxy_resched_idle(struct rq *rq)
static void proxy_migrate_task(struct rq *rq, struct rq_flags *rf,
struct task_struct *p, int target_cpu)
{
struct rq *target_rq = cpu_rq(target_cpu);
LIST_HEAD(migrate_list);
struct rq *target_rq;
lockdep_assert_rq_held(rq);
target_rq = cpu_rq(target_cpu);
/*
* Since we're going to drop @rq, we have to put(@rq->donor) first,
@@ -7216,8 +7235,8 @@ static void proxy_migrate_task(struct rq *rq, struct rq_flags *rf,
/* XXX - Added to address problems with changed dl_server semantics - double check */
__put_prev_set_next_dl_server(rq, rq->donor, rq->curr);
put_prev_task(rq, rq->donor);
rq_set_donor(rq, rq->curr);
set_next_task(rq, rq->curr);
rq_set_donor(rq, rq->idle);
set_next_task(rq, rq->idle);
for (; p; p = p->blocked_donor) {
WARN_ON(p == rq->curr);
@@ -7243,12 +7262,41 @@ static void proxy_migrate_task(struct rq *rq, struct rq_flags *rf,
raw_spin_rq_unlock(target_rq);
raw_spin_rq_lock(rq);
rq_repin_lock(rq, rf);
}
/*
* Ok, now we have the lock again, put rq->curr and
* set_next_task() to idle
*/
proxy_resched_idle(rq);
static void proxy_force_return(struct rq *rq, struct rq_flags *rf,
struct task_struct *p)
{
lockdep_assert_rq_held(rq);
_trace_sched_pe_return_migration(p);
put_prev_task(rq, rq->donor);
rq_set_donor(rq, rq->idle);
set_next_task(rq, rq->idle);
WARN_ON(p == rq->curr);
p->blocked_on_state = BO_WAKING;
get_task_struct(p);
block_task(rq, p, 0);
zap_balance_callbacks(rq);
rq_unpin_lock(rq, rf);
raw_spin_rq_unlock(rq);
wake_up_process(p);
put_task_struct(p);
raw_spin_rq_lock(rq);
rq_repin_lock(rq, rf);
}
static inline bool proxy_can_run_here(struct rq *rq, struct task_struct *p)
{
if (p == rq->curr || p->wake_cpu == cpu_of(rq))
return true;
return false;
}
#else /* !CONFIG_SMP */
static inline
@@ -7256,6 +7304,17 @@ void proxy_migrate_task(struct rq *rq, struct rq_flags *rf,
struct task_struct *p, int target_cpu)
{
}
static inline
void proxy_force_return(struct rq *rq, struct rq_flags *rf,
struct task_struct *p)
{
}
static inline bool proxy_can_run_here(struct rq *rq, struct task_struct *p)
{
return true;
}
#endif /* CONFIG_SMP */
static void proxy_enqueue_on_owner(struct rq *rq, struct task_struct *owner,
@@ -7311,7 +7370,6 @@ static struct task_struct *
find_proxy_task(struct rq *rq, struct task_struct *donor, struct rq_flags *rf)
{
struct task_struct *owner = NULL;
struct task_struct *ret = NULL;
bool curr_in_chain = false;
int this_cpu = cpu_of(rq);
struct task_struct *p;
@@ -7328,18 +7386,48 @@ find_proxy_task(struct rq *rq, struct task_struct *donor, struct rq_flags *rf)
* By taking mutex->wait_lock we hold off concurrent mutex_unlock()
* and ensure @owner sticks around.
*/
raw_spin_lock(&mutex->wait_lock);
raw_spin_lock(&p->blocked_lock);
guard(raw_spinlock)(&mutex->wait_lock);
guard(raw_spinlock)(&p->blocked_lock);
/* Check again that p is blocked with blocked_lock held */
if (mutex != get_task_blocked_on(p)) {
if (mutex != __get_task_blocked_on(p)) {
/*
* Something changed in the blocked_on chain and
* we don't know if only at this level. So, let's
* just bail out completely and let __schedule
* just bail out completely and let __schedule()
* figure things out (pick_again loop).
*/
goto out;
return NULL;
}
/* Double check blocked_on_state now we're holding the lock */
if (p->blocked_on_state == BO_RUNNABLE)
return p;
/*
* If a ww_mutex hits the die/wound case, it marks the task as
* BO_WAKING and calls try_to_wake_up(), so that the mutex
* cycle can be broken and we avoid a deadlock.
*
* However, if at that moment, we are here on the cpu which the
* die/wounded task is enqueued, we might loop on the cycle as
* BO_WAKING still causes task_is_blocked() to return true
* (since we want return migration to occur before we run the
* task).
*
* Unfortunately since we hold the rq lock, it will block
* try_to_wake_up from completing and doing the return
* migration.
*
* So when we hit a BO_WAKING task try to wake it up ourselves.
*/
if (p->blocked_on_state == BO_WAKING) {
if (task_current(rq, p)) {
/* If its current just set it runnable */
__force_blocked_on_runnable(p);
return p;
}
goto needs_return;
}
if (task_current(rq, p))
@@ -7348,61 +7436,22 @@ find_proxy_task(struct rq *rq, struct task_struct *donor, struct rq_flags *rf)
owner = __mutex_owner(mutex);
if (!owner) {
/* If the owner is null, we may have some work to do */
if (!proxy_can_run_here(rq, p))
goto needs_return;
/* First if p is no longer blocked, just return it to run */
if (!task_is_blocked(p)) {
ret = p;
goto out;
}
goto needs_return;
__force_blocked_on_runnable(p);
return p;
}
owner_cpu = task_cpu(owner);
if (owner_cpu != this_cpu) {
trace_sched_pe_migration(donor, owner);
/*
* @owner can disappear, simply migrate to @owner_cpu and leave that CPU
* to sort things out.
*/
raw_spin_unlock(&p->blocked_lock);
raw_spin_unlock(&mutex->wait_lock);
if (curr_in_chain)
return proxy_resched_idle(rq);
proxy_migrate_task(rq, rf, p, owner_cpu);
return NULL;
}
if (task_on_rq_migrating(owner)) {
trace_sched_pe_owner_is_migrating(owner, p);
/*
* One of the chain of mutex owners is currently migrating to this
* CPU, but has not yet been enqueued because we are holding the
* rq lock. As a simple solution, just schedule rq->idle to give
* the migration a chance to complete. Much like the migrate_task
* case we should end up back in find_proxy_task(), this time
* hopefully with all relevant tasks already enqueued.
*/
raw_spin_unlock(&p->blocked_lock);
raw_spin_unlock(&mutex->wait_lock);
return proxy_resched_idle(rq);
}
if (!owner->on_rq || owner->se.sched_delayed) {
if (!READ_ONCE(owner->on_rq) || owner->se.sched_delayed) {
/*
* rq->curr must not be added to the blocked_head list or else
* ttwu_do_activate could enqueue it elsewhere before it switches
* out here. The approach to avoid this is the same as in the
* migrate_task case.
*/
if (curr_in_chain) {
raw_spin_unlock(&p->blocked_lock);
raw_spin_unlock(&mutex->wait_lock);
if (curr_in_chain)
return proxy_resched_idle(rq);
}
/*
* If !@owner->on_rq, holding @rq->lock will not pin the task,
@@ -7412,26 +7461,52 @@ find_proxy_task(struct rq *rq, struct task_struct *donor, struct rq_flags *rf)
* We use @owner->blocked_lock to serialize against ttwu_activate().
* Either we see its new owner->on_rq or it will see our list_add().
*/
if (owner != p) {
raw_spin_unlock(&p->blocked_lock);
raw_spin_lock(&owner->blocked_lock);
}
WARN_ON(owner == p);
raw_spin_unlock(&p->blocked_lock);
raw_spin_lock(&owner->blocked_lock);
proxy_resched_idle(rq);
proxy_enqueue_on_owner(rq, owner, p);
raw_spin_unlock(&owner->blocked_lock);
raw_spin_unlock(&mutex->wait_lock);
raw_spin_lock(&p->blocked_lock);
return NULL; /* retry task selection */
}
owner_cpu = task_cpu(owner);
if (owner_cpu != this_cpu) {
trace_sched_pe_migration(donor, owner);
/*
* @owner can disappear, simply migrate to @owner_cpu
* and leave that CPU to sort things out.
*/
if (curr_in_chain)
return proxy_resched_idle(rq);
goto migrate;
}
if (task_on_rq_migrating(owner)) {
trace_sched_pe_owner_is_migrating(owner, p);
/*
* One of the chain of mutex owners is currently migrating to this
* CPU, but has not yet been enqueued because we are holding the
* rq lock. As a simple solution, just schedule rq->idle to give
* the migration a chance to complete. Much like the migrate_task
* case we should end up back in find_proxy_task(), this time
* hopefully with all relevant tasks already enqueued.
*/
return proxy_resched_idle(rq);
}
/*
* We could race with ttwu's return migration, so holding the
* rq lock, double check owner is both on_rq & on this cpu, as
* it might not even be on our RQ still
* Its possible to race where after we check owner->on_rq
* but before we check (owner_cpu != this_cpu) that the
* task on another cpu was migrated back to this cpu. In
* that case it could slip by our checks. So double check
* we are still on this cpu and not migrating. If we get
* inconsistent results, try again.
*/
if (!(task_on_rq_queued(owner) && task_cpu(owner) == this_cpu))
goto out;
if (!task_on_rq_queued(owner) || task_cpu(owner) != this_cpu)
return NULL;
if (owner == p) {
/*
@@ -7453,81 +7528,34 @@ find_proxy_task(struct rq *rq, struct task_struct *donor, struct rq_flags *rf)
*
* Which leaves us to finish the ttwu_runnable() and make it go.
*
* So schedule rq->idle so that ttwu_runnable can get the rq lock
* and mark owner as running.
* So schedule rq->idle so that ttwu_runnable() can get the rq
* lock and mark owner as running.
*/
if (p->blocked_on_state == BO_WAKING)
goto needs_return;
raw_spin_unlock(&p->blocked_lock);
raw_spin_unlock(&mutex->wait_lock);
return proxy_resched_idle(rq);
}
/*
* If a ww_mutex hits the die/wound case, it marks the task as
* BO_WAKING and calls try_to_wake_up(), so that the mutex
* cycle can be broken and we avoid a deadlock.
*
* However, if at that moment, we are here on the cpu which the
* die/wounded task is enqueued, we might loop on the cycle as
* BO_WAKING still causes task_is_blocked() to return true
* (since we want return migration to occur before we run the
* task).
*
* Unfortunately since we hold the rq lock, it will block
* try_to_wake_up from completing and doing the return
* migration.
*
* So when we hit a BO_WAKING task that has a valid mutex, and
* that mutex has an owner, we're hitting a mid-chain wakeup,
* so we can briefly schedule idle so we release the rq and
* let the wakeup complete.
*/
if (p->blocked_on_state == BO_WAKING)
goto needs_return;
/*
* OK, now we're absolutely sure @owner is on this
* rq, therefore holding @rq->lock is sufficient to
* guarantee its existence, as per ttwu_remote().
*/
raw_spin_unlock(&p->blocked_lock);
raw_spin_unlock(&mutex->wait_lock);
owner->blocked_donor = p;
}
WARN_ON_ONCE(owner && !owner->on_rq);
return owner;
needs_return:
#ifdef CONFIG_SMP
WARN_ON(!is_cpu_allowed(p, p->wake_cpu));
if (p->wake_cpu == this_cpu) {
/* We can actually run here fine */
p->blocked_on_state = BO_RUNNABLE;
ret = p;
goto out;
}
raw_spin_unlock(&p->blocked_lock);
raw_spin_unlock(&mutex->wait_lock);
if (curr_in_chain)
return proxy_resched_idle(rq);
p->blocked_on_state = BO_RUNNABLE;
_trace_sched_pe_return_migration(p);
proxy_migrate_task(rq, rf, p, p->wake_cpu);
/*
* NOTE: This logic is down here, because we need to call
* the functions with the mutex wait_lock and task
* blocked_lock released, so we have to get out of the
* guard() scope.
*/
migrate:
proxy_migrate_task(rq, rf, p, owner_cpu);
return NULL;
needs_return:
proxy_force_return(rq, rf, p);
return NULL;
#else
/* Nowhere else to migrate on UP */
p->blocked_on_state = BO_RUNNABLE;
ret = p;
#endif
out:
raw_spin_unlock(&p->blocked_lock);
raw_spin_unlock(&mutex->wait_lock);
return ret;
}
#else /* SCHED_PROXY_EXEC */
static struct task_struct *
@@ -7609,7 +7637,6 @@ static void __sched notrace __schedule(int sched_mode)
struct rq *rq;
bool prev_not_proxied;
int cpu;
bool preserve_need_resched = false;
cpu = smp_processor_id();
rq = cpu_rq(cpu);
@@ -7678,19 +7705,16 @@ pick_again:
next->blocked_donor = NULL;
if (unlikely(task_is_blocked(next))) {
next = find_proxy_task(rq, next, &rf);
if (!next) {
/* zap the balance_callbacks before picking again */
zap_balance_callbacks(rq);
if (!next)
goto pick_again;
}
if (next == rq->idle)
preserve_need_resched = true;
goto keep_resched;
}
trace_sched_finish_task_selection(rq->donor, next, cpu);
picked:
if (!preserve_need_resched)
clear_tsk_need_resched(prev);
clear_tsk_need_resched(prev);
clear_preempt_need_resched();
keep_resched:
#ifdef CONFIG_SCHED_DEBUG
rq->last_seen_need_resched_ns = 0;
#endif

View File

@@ -1488,7 +1488,9 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64
if (dl_entity_is_special(dl_se))
return;
scaled_delta_exec = dl_scaled_delta_exec(rq, dl_se, delta_exec);
scaled_delta_exec = delta_exec;
if (!dl_server(dl_se))
scaled_delta_exec = dl_scaled_delta_exec(rq, dl_se, delta_exec);
dl_se->runtime -= scaled_delta_exec;
@@ -1595,7 +1597,7 @@ throttle:
*/
void dl_server_update_idle_time(struct rq *rq, struct task_struct *p)
{
s64 delta_exec, scaled_delta_exec;
s64 delta_exec;
if (!rq->fair_server.dl_defer)
return;
@@ -1608,9 +1610,7 @@ void dl_server_update_idle_time(struct rq *rq, struct task_struct *p)
if (delta_exec < 0)
return;
scaled_delta_exec = dl_scaled_delta_exec(rq, &rq->fair_server, delta_exec);
rq->fair_server.runtime -= scaled_delta_exec;
rq->fair_server.runtime -= delta_exec;
if (rq->fair_server.runtime < 0) {
rq->fair_server.dl_defer_running = 0;
@@ -2485,6 +2485,10 @@ static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_s
update_curr_dl(rq);
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
if (task_is_blocked(p))
return;
if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p);
}
@@ -2679,34 +2683,18 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
}
static inline bool __dl_revalidate_rq_state(struct task_struct *task, struct rq *rq,
struct rq *later, bool *retry)
struct rq *later)
{
if (task_rq(task) != rq)
return false;
if (!cpumask_test_cpu(later->cpu, &task->cpus_mask))
return false;
if (task_on_cpu(rq, task))
return false;
if (!dl_task(task))
return false;
if (is_migration_disabled(task))
return false;
if (!task_on_rq_queued(task))
return false;
return true;
return __revalidate_rq_state(task, rq, later);
}
static inline bool dl_revalidate_rq_state(struct task_struct *task, struct rq *rq,
struct rq *later, bool *retry)
{
if (!sched_proxy_exec())
return __dl_revalidate_rq_state(task, rq, later, retry);
return __dl_revalidate_rq_state(task, rq, later);
if (!dl_task(task) || is_migration_disabled(task))
return false;

View File

@@ -1192,7 +1192,7 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
}
#endif /* CONFIG_SMP */
static s64 update_curr_se(struct rq *rq, struct sched_entity *se)
static s64 update_se(struct rq *rq, struct sched_entity *se)
{
u64 now = rq_clock_task(rq);
s64 delta_exec;
@@ -1203,6 +1203,7 @@ static s64 update_curr_se(struct rq *rq, struct sched_entity *se)
se->exec_start = now;
if (entity_is_task(se)) {
struct task_struct *donor = task_of(se);
struct task_struct *running = rq->curr;
/*
* If se is a task, we account the time against the running
@@ -1210,8 +1211,14 @@ static s64 update_curr_se(struct rq *rq, struct sched_entity *se)
*/
running->se.exec_start = now;
running->se.sum_exec_runtime += delta_exec;
trace_sched_stat_runtime(running, delta_exec);
account_group_exec_runtime(running, delta_exec);
/* cgroup time is always accounted against the donor */
cgroup_account_cputime(donor, delta_exec);
} else {
/* If not task, account the time against se */
/* If not task, account the time against donor se */
se->sum_exec_runtime += delta_exec;
}
@@ -1226,13 +1233,6 @@ static s64 update_curr_se(struct rq *rq, struct sched_entity *se)
return delta_exec;
}
static inline void update_curr_task(struct task_struct *p, s64 delta_exec)
{
trace_sched_stat_runtime(p, delta_exec);
account_group_exec_runtime(p, delta_exec);
cgroup_account_cputime(p, delta_exec);
}
static inline bool did_preempt_short(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
if (!sched_feat(PREEMPT_SHORT))
@@ -1271,13 +1271,8 @@ static inline bool do_preempt_short(struct cfs_rq *cfs_rq,
s64 update_curr_common(struct rq *rq)
{
struct task_struct *donor = rq->donor;
s64 delta_exec;
delta_exec = update_curr_se(rq, &donor->se);
if (likely(delta_exec > 0))
update_curr_task(donor, delta_exec);
return delta_exec;
return update_se(rq, &donor->se);
}
/*
@@ -1285,6 +1280,12 @@ s64 update_curr_common(struct rq *rq)
*/
static void update_curr(struct cfs_rq *cfs_rq)
{
/*
* Note: cfs_rq->curr corresponds to the task picked to
* run (ie: rq->donor.se) which due to proxy-exec may
* not necessarily be the actual task running
* (rq->curr.se). This is easy to confuse!
*/
struct sched_entity *curr = cfs_rq->curr;
struct rq *rq = rq_of(cfs_rq);
s64 delta_exec;
@@ -1293,7 +1294,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
if (unlikely(!curr))
return;
delta_exec = update_curr_se(rq, curr);
delta_exec = update_se(rq, curr);
if (unlikely(delta_exec <= 0))
return;
@@ -1302,10 +1303,6 @@ static void update_curr(struct cfs_rq *cfs_rq)
update_min_vruntime(cfs_rq);
if (entity_is_task(curr)) {
struct task_struct *p = task_of(curr);
update_curr_task(p, delta_exec);
/*
* If the fair_server is active, we need to account for the
* fair_server time whether or not the task is running on

View File

@@ -1511,25 +1511,14 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
enqueue_rt_entity(rt_se, flags);
/*
* Current can't be pushed away. Selected is tied to current,
* so don't push it either.
*/
if (task_current(rq, p) || task_current_donor(rq, p))
return;
/*
* Pinned tasks can't be pushed.
*/
if (p->nr_cpus_allowed == 1)
return;
if (should_honor_rt_sync(rq, p, sync))
return;
if (task_is_blocked(p))
return;
enqueue_pushable_task(rq, p);
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
}
static bool dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
@@ -2035,42 +2024,18 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
}
static inline bool __rt_revalidate_rq_state(struct task_struct *task, struct rq *rq,
struct rq *lowest, bool *retry)
struct rq *lowest)
{
/*
* We had to unlock the run queue. In the mean time, task could have
* migrated already or had its affinity changed. Also make sure that it
* wasn't scheduled on its rq. It is possible the task was scheduled,
* set "migrate_disabled" and then got preempted, so we must check the
* task migration disable flag here too.
*/
if (task_rq(task) != rq)
return false;
if (!cpumask_test_cpu(lowest->cpu, &task->cpus_mask))
return false;
if (task_on_cpu(rq, task))
return false;
if (!rt_task(task))
return false;
if (is_migration_disabled(task))
return false;
if (!task_on_rq_queued(task))
return false;
return true;
return __revalidate_rq_state(task, rq, lowest);
}
/* XXX: TODO: Consolidate this w/ dl_revalidate_rq_state */
static inline bool rt_revalidate_rq_state(struct task_struct *task, struct rq *rq,
struct rq *lowest, bool *retry)
{
if (!sched_proxy_exec())
return __rt_revalidate_rq_state(task, rq, lowest, retry);
return __rt_revalidate_rq_state(task, rq, lowest);
/*
* Releasing the rq lock means we need to re-check pushability.
* Some scenarios:

View File

@@ -2336,7 +2336,7 @@ static inline bool task_is_blocked(struct task_struct *p)
static inline int task_on_cpu(struct rq *rq, struct task_struct *p)
{
#ifdef CONFIG_SMP
return p->on_cpu;
return READ_ONCE(p->on_cpu);
#else
return task_current(rq, p);
#endif
@@ -2344,7 +2344,7 @@ static inline int task_on_cpu(struct rq *rq, struct task_struct *p)
static inline int task_on_rq_queued(struct task_struct *p)
{
return p->on_rq == TASK_ON_RQ_QUEUED;
return READ_ONCE(p->on_rq) == TASK_ON_RQ_QUEUED;
}
static inline int task_on_rq_migrating(struct task_struct *p)
@@ -3169,6 +3169,34 @@ extern void set_rq_offline(struct rq *rq);
extern bool sched_smp_initialized;
static inline bool __revalidate_rq_state(struct task_struct *task, struct rq *rq,
struct rq *lowest)
{
/*
* We had to unlock the run queue. In the mean time, task could have
* migrated already or had its affinity changed. Also make sure that it
* wasn't scheduled on its rq. It is possible the task was scheduled,
* set "migrate_disabled" and then got preempted, so we must check the
* task migration disable flag here too.
*/
if (task_rq(task) != rq)
return false;
if (!cpumask_test_cpu(lowest->cpu, &task->cpus_mask))
return false;
if (task_on_cpu(rq, task))
return false;
if (is_migration_disabled(task))
return false;
if (!task_on_rq_queued(task))
return false;
return true;
}
#else /* !CONFIG_SMP: */
/*
@@ -3922,12 +3950,14 @@ int __task_is_pushable(struct rq *rq, struct task_struct *p, int cpu)
return 0;
}
#endif /* CONFIG_SMP */
#ifdef CONFIG_SCHED_PROXY_EXEC
void move_queued_task_locked(struct rq *rq, struct rq *dst_rq, struct task_struct *task);
int task_is_pushable(struct rq *rq, struct task_struct *p, int cpu);
struct task_struct *find_exec_ctx(struct rq *rq, struct task_struct *p);
#else /* !CONFIG_SCHED_PROXY_EXEC */
#ifdef CONFIG_SMP
static inline
void move_queued_task_locked(struct rq *rq, struct rq *dst_rq, struct task_struct *task)
{
@@ -3939,14 +3969,13 @@ int task_is_pushable(struct rq *rq, struct task_struct *p, int cpu)
{
return __task_is_pushable(rq, p, cpu);
}
#endif
static inline
struct task_struct *find_exec_ctx(struct rq *rq, struct task_struct *p)
{
return p;
}
#endif /* CONFIG_SCHED_PROXY_EXEC */
#endif /* CONFIG_SMP */
#ifdef CONFIG_RT_MUTEXES

View File

@@ -82,18 +82,15 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done)
}
static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
struct cpu_stop_work *work,
struct wake_q_head *wakeq)
struct cpu_stop_work *work)
{
list_add_tail(&work->list, &stopper->works);
wake_q_add(wakeq, stopper->thread);
}
/* queue @work to @stopper. if offline, @work is completed immediately */
static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
{
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
DEFINE_WAKE_Q(wakeq);
unsigned long flags;
bool enabled;
@@ -101,12 +98,13 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
raw_spin_lock_irqsave(&stopper->lock, flags);
enabled = stopper->enabled;
if (enabled)
__cpu_stop_queue_work(stopper, work, &wakeq);
__cpu_stop_queue_work(stopper, work);
else if (work->done)
cpu_stop_signal_done(work->done);
raw_spin_unlock_irqrestore(&stopper->lock, flags);
wake_up_q(&wakeq);
if (enabled)
wake_up_process(stopper->thread);
preempt_enable();
return enabled;
@@ -264,7 +262,6 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
{
struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
DEFINE_WAKE_Q(wakeq);
int err;
retry:
@@ -300,8 +297,8 @@ retry:
}
err = 0;
__cpu_stop_queue_work(stopper1, work1, &wakeq);
__cpu_stop_queue_work(stopper2, work2, &wakeq);
__cpu_stop_queue_work(stopper1, work1);
__cpu_stop_queue_work(stopper2, work2);
unlock:
raw_spin_unlock(&stopper2->lock);
@@ -316,7 +313,10 @@ unlock:
goto retry;
}
wake_up_q(&wakeq);
if (!err) {
wake_up_process(stopper1->thread);
wake_up_process(stopper2->thread);
}
preempt_enable();
return err;

View File

@@ -21,6 +21,19 @@
#include <linux/binfmts.h>
#include <linux/proc_ns.h>
#include <trace/hooks/user.h>
#include <linux/android_kabi.h>
ANDROID_KABI_DECLONLY(address_space);
ANDROID_KABI_DECLONLY(cred);
ANDROID_KABI_DECLONLY(dentry);
ANDROID_KABI_DECLONLY(file);
ANDROID_KABI_DECLONLY(io_context);
ANDROID_KABI_DECLONLY(module);
ANDROID_KABI_DECLONLY(pid);
ANDROID_KABI_DECLONLY(sighand_struct);
ANDROID_KABI_DECLONLY(signal_struct);
#if IS_ENABLED(CONFIG_BINFMT_MISC)
struct binfmt_misc init_binfmt_misc = {
.entries = LIST_HEAD_INIT(init_binfmt_misc.entries),
@@ -165,6 +178,7 @@ static void user_epoll_free(struct user_struct *up)
static void free_user(struct user_struct *up, unsigned long flags)
__releases(&uidhash_lock)
{
trace_android_vh_free_user(up);
uid_hash_remove(up);
spin_unlock_irqrestore(&uidhash_lock, flags);
user_epoll_free(up);
@@ -187,6 +201,7 @@ struct user_struct *find_user(kuid_t uid)
spin_unlock_irqrestore(&uidhash_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(find_user);
void free_uid(struct user_struct *up)
{
@@ -216,6 +231,7 @@ struct user_struct *alloc_uid(kuid_t uid)
new->uid = uid;
refcount_set(&new->__count, 1);
trace_android_vh_alloc_uid(new);
if (user_epoll_alloc(new)) {
kmem_cache_free(uid_cachep, new);
return NULL;

View File

@@ -719,6 +719,7 @@ struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
return page_folio(page);
return NULL;
}
EXPORT_SYMBOL_GPL(vm_normal_folio_pmd);
#endif
static void restore_exclusive_pte(struct vm_area_struct *vma,

View File

@@ -472,6 +472,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
int err = 0;
gfp_t gfp = readahead_gfp_mask(mapping);
unsigned int min_ra_size = max(4, mapping_min_folio_nrpages(mapping));
bool bypass = false;
/*
* Fallback when size < min_nrpages as each folio should be
@@ -480,6 +481,11 @@ void page_cache_ra_order(struct readahead_control *ractl,
if (!mapping_large_folio_support(mapping) || ra->size < min_ra_size)
goto fallback;
trace_android_vh_page_cache_ra_order_bypass(ractl, ra, new_order, &gfp,
&bypass);
if (bypass)
goto fallback;
limit = min(limit, index + ra->size - 1);
if (new_order < mapping_max_folio_order(mapping))

View File

@@ -4661,6 +4661,8 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false)))
do_slab_free(s, slab, object, object, 1, addr);
trace_android_vh_slab_free(addr, s);
}
#ifdef CONFIG_MEMCG

View File

@@ -742,6 +742,7 @@ void folio_deactivate(struct folio *folio)
folio_batch_add_and_move(folio, lru_deactivate, true);
}
EXPORT_SYMBOL_GPL(folio_deactivate);
/**
* folio_mark_lazyfree - make an anon folio lazyfree

View File

@@ -21,6 +21,7 @@
#include "mman.c"
#include "mutex.c"
#include "page.c"
#include "pid_namespace.c"
#include "poll.c"
#include "rbtree.c"
#include "refcount.c"

View File

@@ -0,0 +1,26 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/pid_namespace.h>
#include <linux/cleanup.h>
struct pid_namespace *rust_helper_get_pid_ns(struct pid_namespace *ns)
{
return get_pid_ns(ns);
}
void rust_helper_put_pid_ns(struct pid_namespace *ns)
{
put_pid_ns(ns);
}
/* Get a reference on a task's pid namespace. */
struct pid_namespace *rust_helper_task_get_pid_ns(struct task_struct *task)
{
struct pid_namespace *pid_ns;
guard(rcu)();
pid_ns = task_active_pid_ns(task);
if (pid_ns)
get_pid_ns(pid_ns);
return pid_ns;
}

View File

@@ -52,6 +52,7 @@ pub mod mm;
pub mod net;
pub mod page;
pub mod page_size_compat;
pub mod pid_namespace;
pub mod prelude;
pub mod print;
pub mod rbtree;

View File

@@ -14,7 +14,7 @@ use crate::{
error::{to_result, Error, Result, VTABLE_DEFAULT_ERROR},
ffi::{c_int, c_long, c_uint, c_ulong, c_void},
fs::{File, LocalFile},
mm::virt::VmAreaNew,
mm::virt::VmaNew,
prelude::*,
seq_file::SeqFile,
str::CStr,
@@ -40,7 +40,7 @@ impl MiscDeviceOptions {
let mut result: bindings::miscdevice = unsafe { MaybeUninit::zeroed().assume_init() };
result.minor = bindings::MISC_DYNAMIC_MINOR as _;
result.name = self.name.as_char_ptr();
result.fops = create_vtable::<T>();
result.fops = MiscdeviceVTable::<T>::build();
result
}
}
@@ -135,9 +135,9 @@ pub trait MiscDevice: Sized {
fn mmap(
_device: <Self::Ptr as ForeignOwnable>::Borrowed<'_>,
_file: &File,
_vma: &VmAreaNew,
_vma: &VmaNew,
) -> Result {
kernel::build_error!(VTABLE_DEFAULT_ERROR)
kernel::build_error(VTABLE_DEFAULT_ERROR)
}
/// Seeks this miscdevice.
@@ -270,263 +270,265 @@ impl IovIter {
}
}
const fn create_vtable<T: MiscDevice>() -> &'static bindings::file_operations {
const fn maybe_fn<T: Copy>(check: bool, func: T) -> Option<T> {
if check {
Some(func)
} else {
None
/// A vtable for the file operations of a Rust miscdevice.
struct MiscdeviceVTable<T: MiscDevice>(PhantomData<T>);
impl<T: MiscDevice> MiscdeviceVTable<T> {
/// # Safety
///
/// `file` and `inode` must be the file and inode for a file that is undergoing initialization.
/// The file must be associated with a `MiscDeviceRegistration<T>`.
unsafe extern "C" fn open(inode: *mut bindings::inode, raw_file: *mut bindings::file) -> c_int {
// SAFETY: The pointers are valid and for a file being opened.
let ret = unsafe { bindings::generic_file_open(inode, raw_file) };
if ret != 0 {
return ret;
}
// SAFETY: The open call of a file can access the private data.
let misc_ptr = unsafe { (*raw_file).private_data };
// SAFETY: This is a miscdevice, so `misc_open()` set the private data to a pointer to the
// associated `struct miscdevice` before calling into this method. Furthermore,
// `misc_open()` ensures that the miscdevice can't be unregistered and freed during this
// call to `fops_open`.
let misc = unsafe { &*misc_ptr.cast::<MiscDeviceRegistration<T>>() };
// SAFETY:
// * This underlying file is valid for (much longer than) the duration of `T::open`.
// * There is no active fdget_pos region on the file on this thread.
let file = unsafe { File::from_raw_file(raw_file) };
let ptr = match T::open(file, misc) {
Ok(ptr) => ptr,
Err(err) => return err.to_errno(),
};
// This overwrites the private data with the value specified by the user, changing the type
// of this file's private data. All future accesses to the private data is performed by
// other fops_* methods in this file, which all correctly cast the private data to the new
// type.
//
// SAFETY: The open call of a file can access the private data.
unsafe { (*raw_file).private_data = ptr.into_foreign().cast_mut() };
0
}
/// # Safety
///
/// `file` and `inode` must be the file and inode for a file that is being released. The file
/// must be associated with a `MiscDeviceRegistration<T>`.
unsafe extern "C" fn release(_inode: *mut bindings::inode, file: *mut bindings::file) -> c_int {
// SAFETY: The release call of a file owns the private data.
let private = unsafe { (*file).private_data };
// SAFETY: The release call of a file owns the private data.
let ptr = unsafe { <T::Ptr as ForeignOwnable>::from_foreign(private) };
// SAFETY:
// * The file is valid for the duration of this call.
// * There is no active fdget_pos region on the file on this thread.
T::release(ptr, unsafe { File::from_raw_file(file) });
0
}
/// # Safety
///
/// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
/// `vma` must be a vma that is currently being mmap'ed with this file.
unsafe extern "C" fn mmap(
file: *mut bindings::file,
vma: *mut bindings::vm_area_struct,
) -> c_int {
// SAFETY: The mmap call of a file can access the private data.
let private = unsafe { (*file).private_data };
// SAFETY: This is a Rust Miscdevice, so we call `into_foreign` in `open` and
// `from_foreign` in `release`, and `fops_mmap` is guaranteed to be called between those
// two operations.
let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
// SAFETY: The caller provides a vma that is undergoing initial VMA setup.
let area = unsafe { VmaNew::from_raw(vma) };
// SAFETY:
// * The file is valid for the duration of this call.
// * There is no active fdget_pos region on the file on this thread.
let file = unsafe { File::from_raw_file(file) };
match T::mmap(device, file, area) {
Ok(()) => 0,
Err(err) => err.to_errno(),
}
}
struct VtableHelper<T: MiscDevice> {
_t: PhantomData<T>,
/// # Safety
///
/// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
unsafe extern "C" fn llseek(
file: *mut bindings::file,
offset: loff_t,
whence: c_int,
) -> loff_t {
// SAFETY: The release call of a file owns the private data.
let private = unsafe { (*file).private_data };
// SAFETY: Ioctl calls can borrow the private data of the file.
let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
// SAFETY:
// * The file is valid for the duration of this call.
// * We are inside an fdget_pos region, so there cannot be any active fdget_pos regions on
// other threads.
let file = unsafe { LocalFile::from_raw_file(file) };
match T::llseek(device, file, offset, whence) {
Ok(res) => res as loff_t,
Err(err) => err.to_errno() as loff_t,
}
}
impl<T: MiscDevice> VtableHelper<T> {
const VTABLE: bindings::file_operations = bindings::file_operations {
open: Some(fops_open::<T>),
release: Some(fops_release::<T>),
mmap: maybe_fn(T::HAS_MMAP, fops_mmap::<T>),
llseek: maybe_fn(T::HAS_LLSEEK, fops_llseek::<T>),
read_iter: maybe_fn(T::HAS_READ_ITER, fops_read_iter::<T>),
write_iter: maybe_fn(T::HAS_WRITE_ITER, fops_write_iter::<T>),
unlocked_ioctl: maybe_fn(T::HAS_IOCTL, fops_ioctl::<T>),
#[cfg(CONFIG_COMPAT)]
compat_ioctl: if T::HAS_COMPAT_IOCTL {
Some(fops_compat_ioctl::<T>)
} else if T::HAS_IOCTL {
Some(bindings::compat_ptr_ioctl)
} else {
None
},
show_fdinfo: maybe_fn(T::HAS_SHOW_FDINFO, fops_show_fdinfo::<T>),
// SAFETY: All zeros is a valid value for `bindings::file_operations`.
..unsafe { MaybeUninit::zeroed().assume_init() }
/// # Safety
///
/// Arguments must be valid.
unsafe extern "C" fn read_iter(
kiocb: *mut bindings::kiocb,
iter: *mut bindings::iov_iter,
) -> isize {
let kiocb = Kiocb {
inner: unsafe { NonNull::new_unchecked(kiocb) },
_phantom: PhantomData,
};
let iov = unsafe { &mut *iter.cast::<IovIter>() };
match T::read_iter(kiocb, iov) {
Ok(res) => res as isize,
Err(err) => err.to_errno() as isize,
}
}
&VtableHelper::<T>::VTABLE
}
/// # Safety
///
/// Arguments must be valid.
unsafe extern "C" fn write_iter(
kiocb: *mut bindings::kiocb,
iter: *mut bindings::iov_iter,
) -> isize {
let kiocb = Kiocb {
inner: unsafe { NonNull::new_unchecked(kiocb) },
_phantom: PhantomData,
};
let iov = unsafe { &mut *iter.cast::<IovIter>() };
/// # Safety
///
/// `file` and `inode` must be the file and inode for a file that is undergoing initialization.
/// The file must be associated with a `MiscDeviceRegistration<T>`.
unsafe extern "C" fn fops_open<T: MiscDevice>(
inode: *mut bindings::inode,
raw_file: *mut bindings::file,
) -> c_int {
// SAFETY: The pointers are valid and for a file being opened.
let ret = unsafe { bindings::generic_file_open(inode, raw_file) };
if ret != 0 {
return ret;
match T::write_iter(kiocb, iov) {
Ok(res) => res as isize,
Err(err) => err.to_errno() as isize,
}
}
// SAFETY: The open call of a file can access the private data.
let misc_ptr = unsafe { (*raw_file).private_data };
/// # Safety
///
/// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
unsafe extern "C" fn ioctl(file: *mut bindings::file, cmd: c_uint, arg: c_ulong) -> c_long {
// SAFETY: The ioctl call of a file can access the private data.
let private = unsafe { (*file).private_data };
// SAFETY: Ioctl calls can borrow the private data of the file.
let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
// SAFETY: This is a miscdevice, so `misc_open()` set the private data to a pointer to the
// associated `struct miscdevice` before calling into this method. Furthermore, `misc_open()`
// ensures that the miscdevice can't be unregistered and freed during this call to `fops_open`.
let misc = unsafe { &*misc_ptr.cast::<MiscDeviceRegistration<T>>() };
// SAFETY:
// * The file is valid for the duration of this call.
// * There is no active fdget_pos region on the file on this thread.
let file = unsafe { File::from_raw_file(file) };
// SAFETY:
// * This underlying file is valid for (much longer than) the duration of `T::open`.
// * There is no active fdget_pos region on the file on this thread.
let file = unsafe { File::from_raw_file(raw_file) };
match T::ioctl(device, file, cmd, arg) {
Ok(ret) => ret as c_long,
Err(err) => err.to_errno() as c_long,
}
}
let ptr = match T::open(file, misc) {
Ok(ptr) => ptr,
Err(err) => return err.to_errno(),
/// # Safety
///
/// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
#[cfg(CONFIG_COMPAT)]
unsafe extern "C" fn compat_ioctl(
file: *mut bindings::file,
cmd: c_uint,
arg: c_ulong,
) -> c_long {
// SAFETY: The compat ioctl call of a file can access the private data.
let private = unsafe { (*file).private_data };
// SAFETY: Ioctl calls can borrow the private data of the file.
let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
// SAFETY:
// * The file is valid for the duration of this call.
// * There is no active fdget_pos region on the file on this thread.
let file = unsafe { File::from_raw_file(file) };
match T::compat_ioctl(device, file, cmd, arg) {
Ok(ret) => ret as c_long,
Err(err) => err.to_errno() as c_long,
}
}
/// # Safety
///
/// - `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
/// - `seq_file` must be a valid `struct seq_file` that we can write to.
unsafe extern "C" fn show_fdinfo(seq_file: *mut bindings::seq_file, file: *mut bindings::file) {
// SAFETY: The release call of a file owns the private data.
let private = unsafe { (*file).private_data };
// SAFETY: Ioctl calls can borrow the private data of the file.
let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
// SAFETY:
// * The file is valid for the duration of this call.
// * There is no active fdget_pos region on the file on this thread.
let file = unsafe { File::from_raw_file(file) };
// SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in
// which this method is called.
let m = unsafe { SeqFile::from_raw(seq_file) };
T::show_fdinfo(device, m, file);
}
const VTABLE: bindings::file_operations = bindings::file_operations {
open: Some(Self::open),
release: Some(Self::release),
mmap: if T::HAS_MMAP { Some(Self::mmap) } else { None },
llseek: if T::HAS_LLSEEK {
Some(Self::llseek)
} else {
None
},
read_iter: if T::HAS_READ_ITER {
Some(Self::read_iter)
} else {
None
},
write_iter: if T::HAS_WRITE_ITER {
Some(Self::write_iter)
} else {
None
},
unlocked_ioctl: if T::HAS_IOCTL {
Some(Self::ioctl)
} else {
None
},
#[cfg(CONFIG_COMPAT)]
compat_ioctl: if T::HAS_COMPAT_IOCTL {
Some(Self::compat_ioctl)
} else if T::HAS_IOCTL {
Some(bindings::compat_ptr_ioctl)
} else {
None
},
show_fdinfo: if T::HAS_SHOW_FDINFO {
Some(Self::show_fdinfo)
} else {
None
},
// SAFETY: All zeros is a valid value for `bindings::file_operations`.
..unsafe { MaybeUninit::zeroed().assume_init() }
};
// This overwrites the private data with the value specified by the user, changing the type of
// this file's private data. All future accesses to the private data is performed by other
// fops_* methods in this file, which all correctly cast the private data to the new type.
//
// SAFETY: The open call of a file can access the private data.
unsafe { (*raw_file).private_data = ptr.into_foreign().cast_mut() };
0
}
/// # Safety
///
/// `file` and `inode` must be the file and inode for a file that is being released. The file must
/// be associated with a `MiscDeviceRegistration<T>`.
unsafe extern "C" fn fops_release<T: MiscDevice>(
_inode: *mut bindings::inode,
file: *mut bindings::file,
) -> c_int {
// SAFETY: The release call of a file owns the private data.
let private = unsafe { (*file).private_data };
// SAFETY: The release call of a file owns the private data.
let ptr = unsafe { <T::Ptr as ForeignOwnable>::from_foreign(private) };
// SAFETY:
// * The file is valid for the duration of this call.
// * There is no active fdget_pos region on the file on this thread.
T::release(ptr, unsafe { File::from_raw_file(file) });
0
}
/// # Safety
///
/// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
/// `vma` must be a vma that is currently being mmap'ed with this file.
unsafe extern "C" fn fops_mmap<T: MiscDevice>(
file: *mut bindings::file,
vma: *mut bindings::vm_area_struct,
) -> c_int {
// SAFETY: The mmap call of a file can access the private data.
let private = unsafe { (*file).private_data };
// SAFETY: This is a Rust Miscdevice, so we call `into_foreign` in `open` and `from_foreign` in
// `release`, and `fops_mmap` is guaranteed to be called between those two operations.
let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
// SAFETY: The caller provides a vma that is undergoing initial VMA setup.
let area = unsafe { VmAreaNew::from_raw(vma) };
// SAFETY:
// * The file is valid for the duration of this call.
// * There is no active fdget_pos region on the file on this thread.
let file = unsafe { File::from_raw_file(file) };
match T::mmap(device, file, area) {
Ok(()) => 0,
Err(err) => err.to_errno() as c_int,
const fn build() -> &'static bindings::file_operations {
&Self::VTABLE
}
}
/// # Safety
///
/// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
unsafe extern "C" fn fops_llseek<T: MiscDevice>(
file: *mut bindings::file,
offset: loff_t,
whence: c_int,
) -> loff_t {
// SAFETY: The release call of a file owns the private data.
let private = unsafe { (*file).private_data };
// SAFETY: Ioctl calls can borrow the private data of the file.
let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
// SAFETY:
// * The file is valid for the duration of this call.
// * We are inside an fdget_pos region, so there cannot be any active fdget_pos regions on
// other threads.
let file = unsafe { LocalFile::from_raw_file(file) };
match T::llseek(device, file, offset, whence) {
Ok(res) => res as loff_t,
Err(err) => err.to_errno() as loff_t,
}
}
/// # Safety
///
/// Arguments must be valid.
unsafe extern "C" fn fops_read_iter<T: MiscDevice>(
kiocb: *mut bindings::kiocb,
iter: *mut bindings::iov_iter,
) -> isize {
let kiocb = Kiocb {
inner: unsafe { NonNull::new_unchecked(kiocb) },
_phantom: PhantomData,
};
let iov = unsafe { &mut *iter.cast::<IovIter>() };
match T::read_iter(kiocb, iov) {
Ok(res) => res as isize,
Err(err) => err.to_errno() as isize,
}
}
/// # Safety
///
/// Arguments must be valid.
unsafe extern "C" fn fops_write_iter<T: MiscDevice>(
kiocb: *mut bindings::kiocb,
iter: *mut bindings::iov_iter,
) -> isize {
let kiocb = Kiocb {
inner: unsafe { NonNull::new_unchecked(kiocb) },
_phantom: PhantomData,
};
let iov = unsafe { &mut *iter.cast::<IovIter>() };
match T::write_iter(kiocb, iov) {
Ok(res) => res as isize,
Err(err) => err.to_errno() as isize,
}
}
/// # Safety
///
/// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
unsafe extern "C" fn fops_ioctl<T: MiscDevice>(
file: *mut bindings::file,
cmd: c_uint,
arg: c_ulong,
) -> c_long {
// SAFETY: The ioctl call of a file can access the private data.
let private = unsafe { (*file).private_data };
// SAFETY: Ioctl calls can borrow the private data of the file.
let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
// SAFETY:
// * The file is valid for the duration of this call.
// * There is no active fdget_pos region on the file on this thread.
let file = unsafe { File::from_raw_file(file) };
match T::ioctl(device, file, cmd, arg as usize) {
Ok(ret) => ret as c_long,
Err(err) => err.to_errno() as c_long,
}
}
/// # Safety
///
/// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
#[cfg(CONFIG_COMPAT)]
unsafe extern "C" fn fops_compat_ioctl<T: MiscDevice>(
file: *mut bindings::file,
cmd: c_uint,
arg: c_ulong,
) -> c_long {
// SAFETY: The compat ioctl call of a file can access the private data.
let private = unsafe { (*file).private_data };
// SAFETY: Ioctl calls can borrow the private data of the file.
let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
// SAFETY:
// * The file is valid for the duration of this call.
// * There is no active fdget_pos region on the file on this thread.
let file = unsafe { File::from_raw_file(file) };
match T::compat_ioctl(device, file, cmd, arg as usize) {
Ok(ret) => ret as c_long,
Err(err) => err.to_errno() as c_long,
}
}
/// # Safety
///
/// - `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
/// - `seq_file` must be a valid `struct seq_file` that we can write to.
unsafe extern "C" fn fops_show_fdinfo<T: MiscDevice>(
seq_file: *mut bindings::seq_file,
file: *mut bindings::file,
) {
// SAFETY: The release call of a file owns the private data.
let private = unsafe { (*file).private_data };
// SAFETY: Ioctl calls can borrow the private data of the file.
let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
// SAFETY:
// * The file is valid for the duration of this call.
// * There is no active fdget_pos region on the file on this thread.
let file = unsafe { File::from_raw_file(file) };
// SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
// this method is called.
let m = unsafe { SeqFile::from_raw(seq_file) };
T::show_fdinfo(device, m, file);
}

View File

@@ -10,6 +10,7 @@
//! control what happens when userspace reads or writes to that region of memory.
//!
//! C header: [`include/linux/mm.h`](srctree/include/linux/mm.h)
#![cfg(CONFIG_MMU)]
use crate::{
bindings,
@@ -18,7 +19,7 @@ use crate::{
use core::{ops::Deref, ptr::NonNull};
pub mod virt;
use virt::VmAreaRef;
use virt::VmaRef;
/// A wrapper for the kernel's `struct mm_struct`.
///
@@ -131,11 +132,13 @@ unsafe impl Sync for MmWithUserAsync {}
// SAFETY: By the type invariants, this type is always refcounted.
unsafe impl AlwaysRefCounted for MmWithUserAsync {
#[inline]
fn inc_ref(&self) {
// SAFETY: The pointer is valid since self is a reference.
unsafe { bindings::mmget(self.as_raw()) };
}
#[inline]
unsafe fn dec_ref(obj: NonNull<Self>) {
// SAFETY: The caller is giving up their refcount.
unsafe { bindings::mmput_async(obj.cast().as_ptr()) };
@@ -222,12 +225,12 @@ impl MmWithUser {
{
// SAFETY: Calling `bindings::lock_vma_under_rcu` is always okay given an mm where
// `mm_users` is non-zero.
let vma = unsafe { bindings::lock_vma_under_rcu(self.as_raw(), vma_addr as _) };
let vma = unsafe { bindings::lock_vma_under_rcu(self.as_raw(), vma_addr) };
if !vma.is_null() {
return Some(VmaReadGuard {
// SAFETY: If `lock_vma_under_rcu` returns a non-null ptr, then it points at a
// valid vma. The vma is stable for as long as the vma read lock is held.
vma: unsafe { VmAreaRef::from_raw(vma) },
vma: unsafe { VmaRef::from_raw(vma) },
_nts: NotThreadSafe,
});
}
@@ -285,18 +288,20 @@ pub struct MmapReadGuard<'a> {
impl<'a> MmapReadGuard<'a> {
/// Look up a vma at the given address.
#[inline]
pub fn vma_lookup(&self, vma_addr: usize) -> Option<&virt::VmAreaRef> {
// SAFETY: We hold a reference to the mm, so the pointer must be valid. Any value is okay
// for `vma_addr`.
let vma = unsafe { bindings::vma_lookup(self.mm.as_raw(), vma_addr as _) };
pub fn vma_lookup(&self, vma_addr: usize) -> Option<&virt::VmaRef> {
// SAFETY: By the type invariants we hold the mmap read guard, so we can safely call this
// method. Any value is okay for `vma_addr`.
let vma = unsafe { bindings::vma_lookup(self.mm.as_raw(), vma_addr) };
if vma.is_null() {
None
} else {
// SAFETY: We just checked that a vma was found, so the pointer is valid. Furthermore,
// the returned area will borrow from this read lock guard, so it can only be used
// while the mmap read lock is still held.
unsafe { Some(virt::VmAreaRef::from_raw(vma)) }
// SAFETY: We just checked that a vma was found, so the pointer references a valid vma.
//
// Furthermore, the returned vma is still under the protection of the read lock guard
// and can be used while the mmap read lock is still held. That the vma is not used
// after the MmapReadGuard gets dropped is enforced by the borrow-checker.
unsafe { Some(virt::VmaRef::from_raw(vma)) }
}
}
}
@@ -315,17 +320,17 @@ impl Drop for MmapReadGuard<'_> {
///
/// This `VmaReadGuard` guard owns the vma read lock.
pub struct VmaReadGuard<'a> {
vma: &'a VmAreaRef,
vma: &'a VmaRef,
// `vma_end_read` must be called on the same thread as where the lock was taken
_nts: NotThreadSafe,
}
// Make all `VmAreaRef` methods available on `VmaReadGuard`.
// Make all `VmaRef` methods available on `VmaReadGuard`.
impl Deref for VmaReadGuard<'_> {
type Target = VmAreaRef;
type Target = VmaRef;
#[inline]
fn deref(&self) -> &VmAreaRef {
fn deref(&self) -> &VmaRef {
self.vma
}
}

View File

@@ -10,7 +10,7 @@
//!
//! The module has several different Rust types that all correspond to the C type called
//! `vm_area_struct`. The different structs represent what kind of access you have to the VMA, e.g.
//! [`VmAreaRef`] is used when you hold the mmap or vma read lock. Using the appropriate struct
//! [`VmaRef`] is used when you hold the mmap or vma read lock. Using the appropriate struct
//! ensures that you can't, for example, accidentally call a function that requires holding the
//! write lock when you only hold the read lock.
@@ -32,13 +32,13 @@ use core::ops::Deref;
///
/// The caller must hold the mmap read lock or the vma read lock.
#[repr(transparent)]
pub struct VmAreaRef {
pub struct VmaRef {
vma: Opaque<bindings::vm_area_struct>,
}
// Methods you can call when holding the mmap or vma read lock (or stronger). They must be usable
// no matter what the vma flags are.
impl VmAreaRef {
impl VmaRef {
/// Access a virtual memory area given a raw pointer.
///
/// # Safety
@@ -73,7 +73,7 @@ impl VmAreaRef {
pub fn flags(&self) -> vm_flags_t {
// SAFETY: By the type invariants, the caller holds at least the mmap read lock, so this
// access is not a data race.
unsafe { (*self.as_ptr()).__bindgen_anon_2.vm_flags as _ }
unsafe { (*self.as_ptr()).__bindgen_anon_2.vm_flags }
}
/// Returns the (inclusive) start address of the virtual memory area.
@@ -81,7 +81,7 @@ impl VmAreaRef {
pub fn start(&self) -> usize {
// SAFETY: By the type invariants, the caller holds at least the mmap read lock, so this
// access is not a data race.
unsafe { (*self.as_ptr()).__bindgen_anon_1.__bindgen_anon_1.vm_start as _ }
unsafe { (*self.as_ptr()).__bindgen_anon_1.__bindgen_anon_1.vm_start }
}
/// Returns the (exclusive) end address of the virtual memory area.
@@ -89,7 +89,7 @@ impl VmAreaRef {
pub fn end(&self) -> usize {
// SAFETY: By the type invariants, the caller holds at least the mmap read lock, so this
// access is not a data race.
unsafe { (*self.as_ptr()).__bindgen_anon_1.__bindgen_anon_1.vm_end as _ }
unsafe { (*self.as_ptr()).__bindgen_anon_1.__bindgen_anon_1.vm_end }
}
/// Zap pages in the given page range.
@@ -124,27 +124,22 @@ impl VmAreaRef {
// sufficient for this method call. This method has no requirements on the vma flags. The
// address range is checked to be within the vma.
unsafe {
bindings::zap_page_range_single(
self.as_ptr(),
address as _,
size as _,
core::ptr::null_mut(),
)
bindings::zap_page_range_single(self.as_ptr(), address, size, core::ptr::null_mut())
};
}
/// If the [`VM_MIXEDMAP`] flag is set, returns a [`VmAreaMixedMap`] to this VMA, otherwise
/// If the [`VM_MIXEDMAP`] flag is set, returns a [`VmaMixedMap`] to this VMA, otherwise
/// returns `None`.
///
/// This can be used to access methods that require [`VM_MIXEDMAP`] to be set.
///
/// [`VM_MIXEDMAP`]: flags::MIXEDMAP
#[inline]
pub fn as_mixedmap_vma(&self) -> Option<&VmAreaMixedMap> {
pub fn as_mixedmap_vma(&self) -> Option<&VmaMixedMap> {
if self.flags() & flags::MIXEDMAP != 0 {
// SAFETY: We just checked that `VM_MIXEDMAP` is set. All other requirements are
// satisfied by the type invariants of `VmAreaRef`.
Some(unsafe { VmAreaMixedMap::from_raw(self.as_ptr()) })
// satisfied by the type invariants of `VmaRef`.
Some(unsafe { VmaMixedMap::from_raw(self.as_ptr()) })
} else {
None
}
@@ -155,7 +150,7 @@ impl VmAreaRef {
///
/// It represents an area of virtual memory.
///
/// This struct is identical to [`VmAreaRef`] except that it must only be used when the
/// This struct is identical to [`VmaRef`] except that it must only be used when the
/// [`VM_MIXEDMAP`] flag is set on the vma.
///
/// # Invariants
@@ -165,21 +160,21 @@ impl VmAreaRef {
///
/// [`VM_MIXEDMAP`]: flags::MIXEDMAP
#[repr(transparent)]
pub struct VmAreaMixedMap {
vma: VmAreaRef,
pub struct VmaMixedMap {
vma: VmaRef,
}
// Make all `VmAreaRef` methods available on `VmAreaMixedMap`.
impl Deref for VmAreaMixedMap {
type Target = VmAreaRef;
// Make all `VmaRef` methods available on `VmaMixedMap`.
impl Deref for VmaMixedMap {
type Target = VmaRef;
#[inline]
fn deref(&self) -> &VmAreaRef {
fn deref(&self) -> &VmaRef {
&self.vma
}
}
impl VmAreaMixedMap {
impl VmaMixedMap {
/// Access a virtual memory area given a raw pointer.
///
/// # Safety
@@ -199,36 +194,36 @@ impl VmAreaMixedMap {
pub fn vm_insert_page(&self, address: usize, page: &Page) -> Result {
// SAFETY: By the type invariant of `Self` caller has read access and has verified that
// `VM_MIXEDMAP` is set. By invariant on `Page` the page has order 0.
to_result(unsafe { bindings::vm_insert_page(self.as_ptr(), address as _, page.as_ptr()) })
to_result(unsafe { bindings::vm_insert_page(self.as_ptr(), address, page.as_ptr()) })
}
}
/// A configuration object for setting up a VMA in an `f_ops->mmap()` hook.
///
/// The `f_ops->mmap()` hook is called when a new VMA is being created, and the hook is able to
/// configure the VMA in various ways to fit the driver that owns it. Using `VmAreaNew` indicates
/// that you are allowed to perform operations on the VMA that can only be performed before the VMA
/// is fully initialized.
/// configure the VMA in various ways to fit the driver that owns it. Using `VmaNew` indicates that
/// you are allowed to perform operations on the VMA that can only be performed before the VMA is
/// fully initialized.
///
/// # Invariants
///
/// For the duration of 'a, the referenced vma must be undergoing initialization in an
/// `f_ops->mmap()` hook.
pub struct VmAreaNew {
vma: VmAreaRef,
pub struct VmaNew {
vma: VmaRef,
}
// Make all `VmAreaRef` methods available on `VmAreaNew`.
impl Deref for VmAreaNew {
type Target = VmAreaRef;
// Make all `VmaRef` methods available on `VmaNew`.
impl Deref for VmaNew {
type Target = VmaRef;
#[inline]
fn deref(&self) -> &VmAreaRef {
fn deref(&self) -> &VmaRef {
&self.vma
}
}
impl VmAreaNew {
impl VmaNew {
/// Access a virtual memory area given a raw pointer.
///
/// # Safety
@@ -252,7 +247,7 @@ impl VmAreaNew {
flags &= !unset;
// SAFETY: This is not a data race: the vma is undergoing initial setup, so it's not yet
// shared. Additionally, `VmAreaNew` is `!Sync`, so it cannot be used to write in parallel.
// shared. Additionally, `VmaNew` is `!Sync`, so it cannot be used to write in parallel.
// The caller promises that this does not set the flags to an invalid value.
unsafe { (*self.as_ptr()).__bindgen_anon_2.__vm_flags = flags };
}
@@ -262,13 +257,13 @@ impl VmAreaNew {
/// This enables the vma to contain both `struct page` and pure PFN pages. Returns a reference
/// that can be used to call `vm_insert_page` on the vma.
#[inline]
pub fn set_mixedmap(&self) -> &VmAreaMixedMap {
pub fn set_mixedmap(&self) -> &VmaMixedMap {
// SAFETY: We don't yet provide a way to set VM_PFNMAP, so this cannot put the flags in an
// invalid state.
unsafe { self.update_flags(flags::MIXEDMAP, 0) };
// SAFETY: We just set `VM_MIXEDMAP` on the vma.
unsafe { VmAreaMixedMap::from_raw(self.vma.as_ptr()) }
unsafe { VmaMixedMap::from_raw(self.vma.as_ptr()) }
}
/// Set the `VM_IO` flag on this vma.
@@ -391,7 +386,7 @@ impl VmAreaNew {
#[doc(inline)]
pub use bindings::vm_flags_t;
/// All possible flags for [`VmAreaRef`].
/// All possible flags for [`VmaRef`].
pub mod flags {
use super::vm_flags_t;
use crate::bindings;

View File

@@ -0,0 +1,68 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2024 Christian Brauner <brauner@kernel.org>
//! Pid namespaces.
//!
//! C header: [`include/linux/pid_namespace.h`](srctree/include/linux/pid_namespace.h) and
//! [`include/linux/pid.h`](srctree/include/linux/pid.h)
use crate::{
bindings,
types::{AlwaysRefCounted, Opaque},
};
use core::ptr;
/// Wraps the kernel's `struct pid_namespace`. Thread safe.
///
/// This structure represents the Rust abstraction for a C `struct pid_namespace`. This
/// implementation abstracts the usage of an already existing C `struct pid_namespace` within Rust
/// code that we get passed from the C side.
#[repr(transparent)]
pub struct PidNamespace {
inner: Opaque<bindings::pid_namespace>,
}
impl PidNamespace {
/// Returns a raw pointer to the inner C struct.
#[inline]
pub fn as_ptr(&self) -> *mut bindings::pid_namespace {
self.inner.get()
}
/// Creates a reference to a [`PidNamespace`] from a valid pointer.
///
/// # Safety
///
/// The caller must ensure that `ptr` is valid and remains valid for the lifetime of the
/// returned [`PidNamespace`] reference.
pub unsafe fn from_ptr<'a>(ptr: *const bindings::pid_namespace) -> &'a Self {
// SAFETY: The safety requirements guarantee the validity of the dereference, while the
// `PidNamespace` type being transparent makes the cast ok.
unsafe { &*ptr.cast() }
}
}
// SAFETY: Instances of `PidNamespace` are always reference-counted.
unsafe impl AlwaysRefCounted for PidNamespace {
#[inline]
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference means that the refcount is nonzero.
unsafe { bindings::get_pid_ns(self.as_ptr()) };
}
#[inline]
unsafe fn dec_ref(obj: ptr::NonNull<PidNamespace>) {
// SAFETY: The safety requirements guarantee that the refcount is non-zero.
unsafe { bindings::put_pid_ns(obj.cast().as_ptr()) }
}
}
// SAFETY:
// - `PidNamespace::dec_ref` can be called from any thread.
// - It is okay to send ownership of `PidNamespace` across thread boundaries.
unsafe impl Send for PidNamespace {}
// SAFETY: It's OK to access `PidNamespace` through shared references from other threads because
// we're either accessing properties that don't change or that are properly synchronised by C code.
unsafe impl Sync for PidNamespace {}

View File

@@ -8,7 +8,8 @@ use crate::{
bindings,
ffi::{c_int, c_long, c_uint},
mm::MmWithUser,
types::{NotThreadSafe, Opaque},
pid_namespace::PidNamespace,
types::{ARef, NotThreadSafe, Opaque},
};
use core::{
cmp::{Eq, PartialEq},
@@ -238,11 +239,32 @@ impl Task {
unsafe { bindings::signal_pending(self.as_ptr()) != 0 }
}
/// Returns the given task's pid in the current pid namespace.
pub fn pid_in_current_ns(&self) -> Pid {
// SAFETY: It's valid to pass a null pointer as the namespace (defaults to current
// namespace). The task pointer is also valid.
unsafe { bindings::task_tgid_nr_ns(self.as_ptr(), ptr::null_mut()) }
/// Returns task's pid namespace with elevated reference count
pub fn get_pid_ns(&self) -> Option<ARef<PidNamespace>> {
// SAFETY: By the type invariant, we know that `self.0` is valid.
let ptr = unsafe { bindings::task_get_pid_ns(self.0.get()) };
if ptr.is_null() {
None
} else {
// SAFETY: `ptr` is valid by the safety requirements of this function. And we own a
// reference count via `task_get_pid_ns()`.
// CAST: `Self` is a `repr(transparent)` wrapper around `bindings::pid_namespace`.
Some(unsafe { ARef::from_raw(ptr::NonNull::new_unchecked(ptr.cast::<PidNamespace>())) })
}
}
/// Returns the given task's pid in the provided pid namespace.
#[doc(alias = "task_tgid_nr_ns")]
pub fn tgid_nr_ns(&self, pidns: Option<&PidNamespace>) -> Pid {
let pidns = match pidns {
Some(pidns) => pidns.as_ptr(),
None => core::ptr::null_mut(),
};
// SAFETY: By the type invariant, we know that `self.0` is valid. We received a valid
// PidNamespace that we can use as a pointer or we received an empty PidNamespace and
// thus pass a null pointer. The underlying C function is safe to be used with NULL
// pointers.
unsafe { bindings::task_tgid_nr_ns(self.0.get(), pidns) }
}
/// Wakes up the task.
@@ -347,6 +369,38 @@ impl CurrentTask {
// scope is ended with `kthread_unuse_mm()`.
Some(unsafe { MmWithUser::from_raw(mm) })
}
/// Access the pid namespace of the current task.
///
/// This function does not touch the refcount of the namespace or use RCU protection.
///
/// To access the pid namespace of another task, see [`Task::get_pid_ns`].
#[doc(alias = "task_active_pid_ns")]
#[inline]
pub fn active_pid_ns(&self) -> Option<&PidNamespace> {
// SAFETY: It is safe to call `task_active_pid_ns` without RCU protection when calling it
// on the current task.
let active_ns = unsafe { bindings::task_active_pid_ns(self.as_ptr()) };
if active_ns.is_null() {
return None;
}
// The lifetime of `PidNamespace` is bound to `Task` and `struct pid`.
//
// The `PidNamespace` of a `Task` doesn't ever change once the `Task` is alive.
//
// From system call context retrieving the `PidNamespace` for the current task is always
// safe and requires neither RCU locking nor a reference count to be held. Retrieving the
// `PidNamespace` after `release_task()` for current will return `NULL` but no codepath
// like that is exposed to Rust.
//
// SAFETY: If `current`'s pid ns is non-null, then it references a valid pid ns.
// Furthermore, the returned `&PidNamespace` borrows from this `CurrentTask`, so it cannot
// escape the scope in which the current pointer was obtained, e.g. it cannot live past a
// `release_task()` call.
Some(unsafe { PidNamespace::from_ptr(active_ns) })
}
}
// SAFETY: The type invariants guarantee that `Task` is always refcounted.

View File

@@ -5985,6 +5985,13 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
return r < 0 ? r : 0;
}
static void __free_bus(struct rcu_head *rcu)
{
struct kvm_io_bus *bus = container_of(rcu, struct kvm_io_bus, rcu);
kfree(bus);
}
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev)
{
@@ -6023,8 +6030,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
memcpy(new_bus->range + i + 1, bus->range + i,
(bus->dev_count - i) * sizeof(struct kvm_io_range));
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);
kfree(bus);
call_srcu(&kvm->srcu, &bus->rcu, __free_bus);
return 0;
}