ANDROID: rust_binder: add Process::lock_with_nodes()

Since Node uses a LockedBy<ProcessInner> for its protected data, we need
a `&mut ProcessInner` to access the node. However, if we are iterating
over the `process.nodes` rbtree, then we cannot give out exclusive
access to all of `process, as the `nodes` field is in use. To handle
this, we temporarily move the `nodes` field to a local variable (during
which `process.nodes` must not be accessed) and iterate over the local
variable instead.

Add an abstraction to make the above easier, and remove the possibility
of forgetting to put the `nodes` field back into `process` when
releasing the lock.

Bug: 423900220
Change-Id: I50139b422f6973afea4a23c496b04ffc70a956ca
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
This commit is contained in:
Alice Ryhl
2025-06-16 10:41:56 +00:00
parent d9be16a90c
commit df79f04f71
2 changed files with 37 additions and 21 deletions

View File

@@ -18,8 +18,6 @@ use crate::{
DTRWrap, DeliverToRead,
};
use core::mem;
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd)]
pub(crate) struct FreezeCookie(u64);
@@ -270,25 +268,22 @@ impl Process {
let mut recipients =
KVVec::with_capacity(8, GFP_KERNEL).unwrap_or_else(|_err| KVVec::new());
let mut inner = self.inner.lock();
let mut nodes = mem::take(&mut inner.nodes);
let mut curr = nodes.cursor_front();
let mut inner = self.lock_with_nodes();
let mut curr = inner.nodes.cursor_front();
while let Some(cursor) = curr {
let (key, node) = cursor.current();
let key = *key;
let list = node.freeze_list(&inner);
let list = node.freeze_list(&inner.inner);
let len = list.len();
if recipients.spare_capacity_mut().len() < len {
inner.nodes = nodes;
drop(inner);
recipients.reserve(len, GFP_KERNEL)?;
inner = self.inner.lock();
nodes = mem::take(&mut inner.nodes);
inner = self.lock_with_nodes();
// Find the node we were looking at and try again. If the set of nodes was changed,
// then just proceed to the next node. This is ok because we don't guarantee the
// inclusion of nodes that are added or removed in parallel with this operation.
curr = nodes.cursor_lower_bound(&key);
curr = inner.nodes.cursor_lower_bound(&key);
continue;
}
@@ -306,7 +301,6 @@ impl Process {
curr = cursor.move_next();
}
inner.nodes = nodes;
Ok(recipients)
}

View File

@@ -27,7 +27,8 @@ use kernel::{
seq_print,
sync::poll::PollTable,
sync::{
lock::Guard, Arc, ArcBorrow, CondVar, CondVarTimeoutResult, Mutex, SpinLock, UniqueArc,
lock::{spinlock::SpinLockBackend, Guard},
Arc, ArcBorrow, CondVar, CondVarTimeoutResult, Mutex, SpinLock, UniqueArc,
},
task::Task,
types::{ARef, Either},
@@ -1246,6 +1247,18 @@ impl Process {
}
}
/// Locks the spinlock and move the `nodes` rbtree out.
///
/// This allows you to iterate through `nodes` while also allowing you to give other parts of
/// the codebase exclusive access to `ProcessInner`.
pub(crate) fn lock_with_nodes(&self) -> WithNodes<'_> {
let mut inner = self.inner.lock();
WithNodes {
nodes: take(&mut inner.nodes),
inner,
}
}
fn deferred_flush(&self) {
let inner = self.inner.lock();
for thread in inner.threads.values() {
@@ -1274,12 +1287,10 @@ impl Process {
// Move oneway_todo into the process todolist.
{
let mut inner = self.inner.lock();
let nodes = take(&mut inner.nodes);
for node in nodes.values() {
node.release(&mut inner);
let mut inner = self.lock_with_nodes();
for node in inner.nodes.values() {
node.release(&mut inner.inner);
}
inner.nodes = nodes;
}
// Cancel all pending work items.
@@ -1642,10 +1653,7 @@ pub(crate) struct Registration<'a> {
}
impl<'a> Registration<'a> {
fn new(
thread: &'a Arc<Thread>,
guard: &mut Guard<'_, ProcessInner, kernel::sync::lock::spinlock::SpinLockBackend>,
) -> Self {
fn new(thread: &'a Arc<Thread>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) -> Self {
assert!(core::ptr::eq(&thread.process.inner, guard.lock_ref()));
// INVARIANT: We are pushing this thread to the right `ready_threads` list.
if let Ok(list_arc) = ListArc::try_from_arc(thread.clone()) {
@@ -1669,3 +1677,17 @@ impl Drop for Registration<'_> {
unsafe { inner.ready_threads.remove(self.thread) };
}
}
pub(crate) struct WithNodes<'a> {
pub(crate) inner: Guard<'a, ProcessInner, SpinLockBackend>,
pub(crate) nodes: RBTree<u64, DArc<Node>>,
}
impl Drop for WithNodes<'_> {
fn drop(&mut self) {
core::mem::swap(&mut self.nodes, &mut self.inner.nodes);
if self.nodes.iter().next().is_some() {
pr_err!("nodes array was modified while using lock_with_nodes\n");
}
}
}