ANDROID: rust_binder: fixups for 6.12.19 LTS

The 6.12.19 LTS of the kernel contains some changes to the Rust kernel
crate involving the integer types used for FFI and the removal of the
alloc crate in favor of a custom alloc utilities.

Bug: 388786466
Change-Id: Ie4cab650deaf1f2ffc4ee64d367e52f0671791c1
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
This commit is contained in:
Alice Ryhl
2025-04-16 12:49:03 +00:00
committed by Matthew Maurer
parent bf40001347
commit 7163533526
13 changed files with 96 additions and 126 deletions

View File

@@ -201,9 +201,9 @@ impl Allocation {
let files = core::mem::take(&mut file_list.files_to_translate);
let num_close_on_free = files.iter().filter(|entry| entry.close_on_free).count();
let mut close_on_free = Vec::with_capacity(num_close_on_free, GFP_KERNEL)?;
let mut close_on_free = KVec::with_capacity(num_close_on_free, GFP_KERNEL)?;
let mut reservations = Vec::with_capacity(files.len(), GFP_KERNEL)?;
let mut reservations = KVec::with_capacity(files.len(), GFP_KERNEL)?;
for file_info in files {
let res = FileDescriptorReservation::get_unused_fd_flags(bindings::O_CLOEXEC)?;
let fd = res.reserved_fd();
@@ -566,8 +566,8 @@ impl BinderObject {
#[derive(Default)]
struct FileList {
files_to_translate: Vec<FileEntry>,
close_on_free: Vec<u32>,
files_to_translate: KVec<FileEntry>,
close_on_free: KVec<u32>,
}
struct FileEntry {
@@ -580,7 +580,7 @@ struct FileEntry {
}
pub(crate) struct TranslatedFds {
reservations: Vec<Reservation>,
reservations: KVec<Reservation>,
/// If commit is called, then these fds should be closed. (If commit is not called, then they
/// shouldn't be closed.)
close_on_free: FdsCloseOnFree,
@@ -594,8 +594,8 @@ struct Reservation {
impl TranslatedFds {
pub(crate) fn new() -> Self {
Self {
reservations: Vec::new(),
close_on_free: FdsCloseOnFree(Vec::new()),
reservations: KVec::new(),
close_on_free: FdsCloseOnFree(KVec::new()),
}
}
@@ -608,4 +608,4 @@ impl TranslatedFds {
}
}
pub(crate) struct FdsCloseOnFree(Vec<u32>);
pub(crate) struct FdsCloseOnFree(KVec<u32>);

View File

@@ -25,12 +25,12 @@ pub(crate) struct ContextList {
list: List<Context>,
}
pub(crate) fn get_all_contexts() -> Result<Vec<Arc<Context>>> {
pub(crate) fn get_all_contexts() -> Result<KVec<Arc<Context>>> {
let lock = CONTEXTS.lock();
let count = lock.list.iter().count();
let mut ctxs = Vec::with_capacity(count, GFP_KERNEL)?;
let mut ctxs = KVec::with_capacity(count, GFP_KERNEL)?;
for ctx in &lock.list {
ctxs.push(Arc::from(ctx), GFP_KERNEL)?;
}
@@ -161,20 +161,23 @@ impl Context {
}
}
pub(crate) fn get_all_procs(&self) -> Result<Vec<Arc<Process>>> {
pub(crate) fn get_all_procs(&self) -> Result<KVec<Arc<Process>>> {
let lock = self.manager.lock();
let count = lock.all_procs.iter().count();
let mut procs = Vec::with_capacity(count, GFP_KERNEL)?;
let mut procs = KVec::with_capacity(count, GFP_KERNEL)?;
for proc in &lock.all_procs {
procs.push(Arc::from(proc), GFP_KERNEL)?;
}
Ok(procs)
}
pub(crate) fn get_procs_with_pid(&self, pid: i32) -> Result<Vec<Arc<Process>>> {
let mut procs = self.get_all_procs()?;
procs.retain(|proc| proc.task.pid() == pid);
Ok(procs)
pub(crate) fn get_procs_with_pid(&self, pid: i32) -> Result<KVec<Arc<Process>>> {
let orig = self.get_all_procs()?;
let mut backing = KVec::with_capacity(orig.len(), GFP_KERNEL)?;
for proc in orig.into_iter().filter(|proc| proc.task.pid() == pid) {
backing.push(proc, GFP_KERNEL)?;
}
Ok(backing)
}
}

View File

@@ -20,7 +20,7 @@ use kernel::{
/// Additional motivation can be found in commit 80cd795630d6 ("binder: fix use-after-free due to
/// ksys_close() during fdget()") and in the comments on `binder_do_fd_close`.
pub(crate) struct DeferredFdCloser {
inner: Box<DeferredFdCloserInner>,
inner: KBox<DeferredFdCloserInner>,
}
/// SAFETY: This just holds an allocation with no real content, so there's no safety issue with
@@ -43,7 +43,7 @@ impl DeferredFdCloser {
pub(crate) fn new(flags: Flags) -> Result<Self, AllocError> {
Ok(Self {
// INVARIANT: The `file` pointer is null, so the type invariant does not apply.
inner: Box::new(
inner: KBox::new(
DeferredFdCloserInner {
twork: MaybeUninit::uninit(),
file: core::ptr::null_mut(),
@@ -74,11 +74,11 @@ impl DeferredFdCloser {
}
// Transfer ownership of the box's allocation to a raw pointer. This disables the
// destructor, so we must manually convert it back to a Box to drop it.
// destructor, so we must manually convert it back to a KBox to drop it.
//
// Until we convert it back to a `Box`, there are no aliasing requirements on this
// Until we convert it back to a `KBox`, there are no aliasing requirements on this
// pointer.
let inner = Box::into_raw(self.inner);
let inner = KBox::into_raw(self.inner);
// The `callback_head` field is first in the struct, so this cast correctly gives us a
// pointer to the field.
@@ -110,7 +110,7 @@ impl DeferredFdCloser {
if res != 0 {
// SAFETY: Scheduling the task work failed, so we still have ownership of the box, so
// we may destroy it.
unsafe { drop(Box::from_raw(inner)) };
unsafe { drop(KBox::from_raw(inner)) };
return Err(DeferredFdCloseError::TaskWorkUnavailable);
}
@@ -169,11 +169,11 @@ impl DeferredFdCloser {
/// # Safety
///
/// The provided pointer must point at the `twork` field of a `DeferredFdCloserInner` stored in
/// a `Box`, and the caller must pass exclusive ownership of that `Box`. Furthermore, if the
/// a `KBox`, and the caller must pass exclusive ownership of that `KBox`. Furthermore, if the
/// file pointer is non-null, then it must be okay to release the refcount by calling `fput`.
unsafe extern "C" fn do_close_fd(inner: *mut bindings::callback_head) {
// SAFETY: The caller just passed us ownership of this box.
let inner = unsafe { Box::from_raw(inner.cast::<DeferredFdCloserInner>()) };
let inner = unsafe { KBox::from_raw(inner.cast::<DeferredFdCloserInner>()) };
if !inner.file.is_null() {
// SAFETY: By the type invariants, we own a refcount to this file, and the caller
// guarantees that dropping the refcount now is okay.

View File

@@ -41,7 +41,7 @@ impl BinderError {
self.reply == BR_DEAD_REPLY
}
pub(crate) fn as_errno(&self) -> core::ffi::c_int {
pub(crate) fn as_errno(&self) -> kernel::ffi::c_int {
self.source.unwrap_or(EINVAL).to_errno()
}

View File

@@ -15,15 +15,17 @@
use core::{
alloc::Layout,
ffi::{c_ulong, c_void},
marker::PhantomPinned,
mem::{size_of, size_of_val, MaybeUninit},
ptr,
ptr::{self, NonNull},
};
use kernel::{
alloc::allocator::Kmalloc,
alloc::Allocator,
bindings,
error::Result,
ffi::{c_ulong, c_void},
mm::{virt, Mm, MmWithUser},
new_mutex, new_spinlock,
page::{Page, PAGE_SHIFT, PAGE_SIZE},
@@ -294,16 +296,13 @@ impl ShrinkablePageRange {
let layout = Layout::array::<PageInfo>(num_pages).map_err(|_| ENOMEM)?;
// SAFETY: The layout has non-zero size.
let pages = unsafe { alloc::alloc::alloc(layout) as *mut PageInfo };
if pages.is_null() {
return Err(ENOMEM);
}
let pages = Kmalloc::alloc(layout, GFP_KERNEL)?.cast::<PageInfo>();
// SAFETY: This just initializes the pages array.
unsafe {
let self_ptr = self as *const ShrinkablePageRange;
for i in 0..num_pages {
let info = pages.add(i);
let info = pages.add(i).as_ptr();
ptr::addr_of_mut!((*info).range).write(self_ptr);
ptr::addr_of_mut!((*info).page).write(None);
let lru = ptr::addr_of_mut!((*info).lru);
@@ -317,11 +316,11 @@ impl ShrinkablePageRange {
pr_debug!("Failed to register with vma: already registered");
drop(inner);
// SAFETY: The `pages` array was allocated with the same layout.
unsafe { alloc::alloc::dealloc(pages.cast(), layout) };
unsafe { Kmalloc::free(pages.cast(), layout) };
return Err(EBUSY);
}
inner.pages = pages;
inner.pages = pages.as_ptr();
inner.size = num_pages;
inner.vma_addr = vma.start();
@@ -638,12 +637,16 @@ impl PinnedDrop for ShrinkablePageRange {
// `stable_trylock_mm`.
drop(self.mm_lock.lock());
let Some(pages) = NonNull::new(pages) else {
return;
};
// SAFETY: This computation did not overflow when allocating the pages array, so it will
// not overflow this time.
let layout = unsafe { Layout::array::<PageInfo>(size).unwrap_unchecked() };
// SAFETY: The `pages` array was allocated with the same layout.
unsafe { alloc::alloc::dealloc(pages.cast(), layout) };
unsafe { Kmalloc::free(pages.cast(), layout) };
}
}
@@ -676,7 +679,7 @@ unsafe extern "C" fn rust_shrink_scan(
item: *mut bindings::list_head,
list: *mut bindings::list_lru_one,
lock: *mut bindings::spinlock_t,
cb_arg: *mut core::ffi::c_void,
cb_arg: *mut kernel::ffi::c_void,
) -> bindings::lru_status;
}

View File

@@ -6,9 +6,9 @@
use kernel::bindings;
pub(crate) type Policy = core::ffi::c_uint;
pub(crate) type Priority = core::ffi::c_int;
pub(crate) type Nice = core::ffi::c_int;
pub(crate) type Policy = kernel::ffi::c_uint;
pub(crate) type Priority = kernel::ffi::c_int;
pub(crate) type Nice = kernel::ffi::c_int;
pub(crate) const SCHED_NORMAL: Policy = bindings::SCHED_NORMAL;
pub(crate) const SCHED_FIFO: Policy = bindings::SCHED_FIFO;

View File

@@ -549,8 +549,8 @@ impl Process {
seq_print!(m, "proc {}\n", self.task.pid_in_current_ns());
seq_print!(m, "context {}\n", &*ctx.name);
let mut all_threads = Vec::new();
let mut all_nodes = Vec::new();
let mut all_threads = KVec::new();
let mut all_nodes = KVec::new();
loop {
let inner = self.inner.lock();
let num_threads = inner.threads.iter().count();
@@ -1434,7 +1434,7 @@ fn ioctl_freeze(reader: &mut UserSliceReader) -> Result {
// Very unlikely for there to be more than 3, since a process normally uses at most binder and
// hwbinder.
let mut procs = Vec::with_capacity(3, GFP_KERNEL)?;
let mut procs = KVec::with_capacity(3, GFP_KERNEL)?;
let ctxs = crate::context::get_all_contexts()?;
for ctx in ctxs {

View File

@@ -22,7 +22,7 @@ pub(super) struct ArrayRangeAllocator<T> {
/// store the free ranges.
///
/// Sorted by offset.
pub(super) ranges: Vec<Range<T>>,
pub(super) ranges: KVec<Range<T>>,
size: usize,
free_oneway_space: usize,
}
@@ -141,7 +141,7 @@ impl<T> ArrayRangeAllocator<T> {
state: DescriptorState::new(is_oneway, debug_id, pid),
};
// Insert the value at the given index to keep the array sorted.
insert_within_capacity(&mut self.ranges, insert_at_idx, new_range);
self.ranges.insert_within_capacity(insert_at_idx, new_range).ok().unwrap();
Ok(insert_at_offset)
}
@@ -182,7 +182,7 @@ impl<T> ArrayRangeAllocator<T> {
}
}
self.ranges.remove(i);
self.ranges.remove(i)?;
Ok(freed_range)
}
@@ -235,48 +235,13 @@ impl<T> ArrayRangeAllocator<T> {
}
pub(crate) struct EmptyArrayAlloc<T> {
ranges: Vec<Range<T>>,
ranges: KVec<Range<T>>,
}
impl<T> EmptyArrayAlloc<T> {
pub(crate) fn try_new(capacity: usize) -> Result<Self> {
Ok(Self {
ranges: Vec::with_capacity(capacity, GFP_KERNEL)?,
ranges: KVec::with_capacity(capacity, GFP_KERNEL)?,
})
}
}
/// Copied from `rust/alloc/vec/mod.rs` with allocation codepath removed.
///
/// TODO: Either add this to the standard library (like [`push_within_capacity`]) or move it to the
/// kernel crate once [the updated allocation APIs][alloc] are available.
///
/// [`push_within_capacity`]: https://github.com/rust-lang/rust/issues/100486
/// [alloc]: https://lore.kernel.org/r/20240328013603.206764-1-wedsonaf@gmail.com
fn insert_within_capacity<T>(vec: &mut Vec<T>, index: usize, element: T) {
let len = vec.len();
if len == vec.capacity() {
panic!("no capacity to insert");
}
unsafe {
// The spot to put the new value
{
let p = vec.as_mut_ptr().add(index);
if index < len {
// Shift everything over to make space. (Duplicating the
// `index`th element into two consecutive places.)
core::ptr::copy(p, p.add(1), len - index);
} else if index == len {
// No elements need shifting.
} else {
panic!("insertion index (is {index}) should be <= len (is {len})");
}
// Write it in, overwriting the first copy of the `index`th
// element.
core::ptr::write(p, element);
}
vec.set_len(len + 1);
}
}

View File

@@ -33,7 +33,7 @@ pub(super) struct TreeRangeAllocator<T> {
impl<T> TreeRangeAllocator<T> {
pub(crate) fn from_array(
size: usize,
ranges: &mut Vec<Range<T>>,
ranges: &mut KVec<Range<T>>,
alloc: &mut FromArrayAllocs<T>,
) -> Self {
let mut tree = TreeRangeAllocator {
@@ -44,7 +44,7 @@ impl<T> TreeRangeAllocator<T> {
};
let mut free_offset = 0;
for range in ranges.drain(..) {
for range in ranges.drain_all() {
let free_size = range.offset - free_offset;
if free_size > 0 {
let free_node = alloc.free_tree.pop().unwrap();
@@ -477,20 +477,20 @@ impl<T> ReserveNewTreeAlloc<T> {
/// An allocation for creating a tree from an `ArrayRangeAllocator`.
pub(crate) struct FromArrayAllocs<T> {
tree: Vec<RBTreeNodeReservation<usize, Descriptor<T>>>,
free_tree: Vec<RBTreeNodeReservation<FreeKey, ()>>,
tree: KVec<RBTreeNodeReservation<usize, Descriptor<T>>>,
free_tree: KVec<RBTreeNodeReservation<FreeKey, ()>>,
}
impl<T> FromArrayAllocs<T> {
pub(crate) fn try_new(len: usize) -> Result<Self> {
let num_descriptors = 2 * len + 1;
let mut tree = Vec::with_capacity(num_descriptors, GFP_KERNEL)?;
let mut tree = KVec::with_capacity(num_descriptors, GFP_KERNEL)?;
for _ in 0..num_descriptors {
tree.push(RBTreeNodeReservation::new(GFP_KERNEL)?, GFP_KERNEL)?;
}
let mut free_tree = Vec::with_capacity(num_descriptors, GFP_KERNEL)?;
let mut free_tree = KVec::with_capacity(num_descriptors, GFP_KERNEL)?;
for _ in 0..num_descriptors {
free_tree.push(RBTreeNodeReservation::new(GFP_KERNEL)?, GFP_KERNEL)?;
}

View File

@@ -46,22 +46,22 @@ mod binderfs {
use kernel::bindings::{dentry, inode};
extern "C" {
pub fn init_rust_binderfs() -> core::ffi::c_int;
pub fn init_rust_binderfs() -> kernel::ffi::c_int;
}
extern "C" {
pub fn rust_binderfs_create_proc_file(
nodp: *mut inode,
pid: core::ffi::c_int,
pid: kernel::ffi::c_int,
) -> *mut dentry;
}
extern "C" {
pub fn rust_binderfs_remove_file(dentry: *mut dentry);
}
pub type rust_binder_context = *mut core::ffi::c_void;
pub type rust_binder_context = *mut kernel::ffi::c_void;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct binder_device {
pub minor: core::ffi::c_int,
pub minor: kernel::ffi::c_int,
pub ctx: rust_binder_context,
}
impl Default for binder_device {
@@ -352,8 +352,8 @@ pub static rust_binder_fops: AssertSync<kernel::bindings::file_operations> = {
#[no_mangle]
unsafe extern "C" fn rust_binder_new_context(
name: *const core::ffi::c_char,
) -> *mut core::ffi::c_void {
name: *const kernel::ffi::c_char,
) -> *mut kernel::ffi::c_void {
// SAFETY: The caller will always provide a valid c string here.
let name = unsafe { kernel::str::CStr::from_char_ptr(name) };
match Context::new(name) {
@@ -363,7 +363,7 @@ unsafe extern "C" fn rust_binder_new_context(
}
#[no_mangle]
unsafe extern "C" fn rust_binder_remove_context(device: *mut core::ffi::c_void) {
unsafe extern "C" fn rust_binder_remove_context(device: *mut kernel::ffi::c_void) {
if !device.is_null() {
// SAFETY: The caller ensures that the `device` pointer came from a previous call to
// `rust_binder_new_device`.
@@ -376,7 +376,7 @@ unsafe extern "C" fn rust_binder_remove_context(device: *mut core::ffi::c_void)
unsafe extern "C" fn rust_binder_open(
inode: *mut bindings::inode,
file_ptr: *mut bindings::file,
) -> core::ffi::c_int {
) -> kernel::ffi::c_int {
// SAFETY: The `rust_binderfs.c` file ensures that `i_private` is set to a
// `struct binder_device`.
let device = unsafe { (*inode).i_private } as *const binderfs::binder_device;
@@ -409,7 +409,7 @@ unsafe extern "C" fn rust_binder_open(
unsafe extern "C" fn rust_binder_release(
_inode: *mut bindings::inode,
file: *mut bindings::file,
) -> core::ffi::c_int {
) -> kernel::ffi::c_int {
// SAFETY: We previously set `private_data` in `rust_binder_open`.
let process = unsafe { Arc::<Process>::from_foreign((*file).private_data) };
// SAFETY: The caller ensures that the file is valid.
@@ -420,36 +420,36 @@ unsafe extern "C" fn rust_binder_release(
unsafe extern "C" fn rust_binder_compat_ioctl(
file: *mut bindings::file,
cmd: core::ffi::c_uint,
arg: core::ffi::c_ulong,
) -> core::ffi::c_long {
cmd: kernel::ffi::c_uint,
arg: kernel::ffi::c_ulong,
) -> kernel::ffi::c_long {
// SAFETY: We previously set `private_data` in `rust_binder_open`.
let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
// SAFETY: The caller ensures that the file is valid.
match Process::compat_ioctl(f, unsafe { File::from_raw_file(file) }, cmd as _, arg as _) {
Ok(()) => 0,
Err(err) => err.to_errno().into(),
Err(err) => err.to_errno() as isize,
}
}
unsafe extern "C" fn rust_binder_unlocked_ioctl(
file: *mut bindings::file,
cmd: core::ffi::c_uint,
arg: core::ffi::c_ulong,
) -> core::ffi::c_long {
cmd: kernel::ffi::c_uint,
arg: kernel::ffi::c_ulong,
) -> kernel::ffi::c_long {
// SAFETY: We previously set `private_data` in `rust_binder_open`.
let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
// SAFETY: The caller ensures that the file is valid.
match Process::ioctl(f, unsafe { File::from_raw_file(file) }, cmd as _, arg as _) {
Ok(()) => 0,
Err(err) => err.to_errno().into(),
Err(err) => err.to_errno() as isize,
}
}
unsafe extern "C" fn rust_binder_mmap(
file: *mut bindings::file,
vma: *mut bindings::vm_area_struct,
) -> core::ffi::c_int {
) -> kernel::ffi::c_int {
// SAFETY: We previously set `private_data` in `rust_binder_open`.
let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
// SAFETY: The caller ensures that the vma is valid.
@@ -479,7 +479,7 @@ unsafe extern "C" fn rust_binder_poll(
unsafe extern "C" fn rust_binder_flush(
file: *mut bindings::file,
_id: bindings::fl_owner_t,
) -> core::ffi::c_int {
) -> kernel::ffi::c_int {
// SAFETY: We previously set `private_data` in `rust_binder_open`.
let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
match Process::flush(f) {
@@ -491,8 +491,8 @@ unsafe extern "C" fn rust_binder_flush(
#[no_mangle]
unsafe extern "C" fn rust_binder_stats_show(
ptr: *mut seq_file,
_: *mut core::ffi::c_void,
) -> core::ffi::c_int {
_: *mut kernel::ffi::c_void,
) -> kernel::ffi::c_int {
// SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
// this method is called.
let m = unsafe { SeqFile::from_raw(ptr) };
@@ -505,8 +505,8 @@ unsafe extern "C" fn rust_binder_stats_show(
#[no_mangle]
unsafe extern "C" fn rust_binder_state_show(
ptr: *mut seq_file,
_: *mut core::ffi::c_void,
) -> core::ffi::c_int {
_: *mut kernel::ffi::c_void,
) -> kernel::ffi::c_int {
// SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
// this method is called.
let m = unsafe { SeqFile::from_raw(ptr) };
@@ -519,8 +519,8 @@ unsafe extern "C" fn rust_binder_state_show(
#[no_mangle]
unsafe extern "C" fn rust_binder_proc_show(
ptr: *mut seq_file,
_: *mut core::ffi::c_void,
) -> core::ffi::c_int {
_: *mut kernel::ffi::c_void,
) -> kernel::ffi::c_int {
// SAFETY: Accessing the private field of `seq_file` is okay.
let pid = (unsafe { (*ptr).private }) as usize as Pid;
// SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
@@ -535,8 +535,8 @@ unsafe extern "C" fn rust_binder_proc_show(
#[no_mangle]
unsafe extern "C" fn rust_binder_transactions_show(
ptr: *mut seq_file,
_: *mut core::ffi::c_void,
) -> core::ffi::c_int {
_: *mut kernel::ffi::c_void,
) -> kernel::ffi::c_int {
// SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
// this method is called.
let m = unsafe { SeqFile::from_raw(ptr) };

View File

@@ -63,8 +63,8 @@ mod strings {
use kernel::str::CStr;
extern "C" {
static binder_command_strings: [*const core::ffi::c_char; super::BC_COUNT];
static binder_return_strings: [*const core::ffi::c_char; super::BR_COUNT];
static binder_command_strings: [*const u8; super::BC_COUNT];
static binder_return_strings: [*const u8; super::BR_COUNT];
}
pub(super) fn command_string(i: usize) -> &'static str {

View File

@@ -46,10 +46,10 @@ struct ScatterGatherState {
/// A struct that tracks the amount of unused buffer space.
unused_buffer_space: UnusedBufferSpace,
/// Scatter-gather entries to copy.
sg_entries: Vec<ScatterGatherEntry>,
sg_entries: KVec<ScatterGatherEntry>,
/// Indexes into `sg_entries` corresponding to the last binder_buffer_object that
/// was processed and all of its ancestors. The array is in sorted order.
ancestors: Vec<usize>,
ancestors: KVec<usize>,
}
/// This entry specifies an additional buffer that should be copied using the scatter-gather
@@ -66,7 +66,7 @@ struct ScatterGatherEntry {
/// The minimum offset of the next fixup in this buffer.
fixup_min_offset: usize,
/// The offsets within this buffer that contain pointers which should be translated.
pointer_fixups: Vec<PointerFixupEntry>,
pointer_fixups: KVec<PointerFixupEntry>,
}
/// This entry specifies that a fixup should happen at `target_offset` of the
@@ -843,7 +843,7 @@ impl Thread {
offset: alloc_offset,
sender_uaddr: obj.buffer as _,
length: obj_length,
pointer_fixups: Vec::new(),
pointer_fixups: KVec::new(),
fixup_min_offset: 0,
},
GFP_KERNEL,
@@ -941,7 +941,7 @@ impl Thread {
.sender_uaddr
.checked_add(parent_offset)
.ok_or(EINVAL)?;
let mut fda_bytes = Vec::new();
let mut fda_bytes = KVec::new();
UserSlice::new(fda_uaddr as _, fds_len).read_all(&mut fda_bytes, GFP_KERNEL)?;
if fds_len != fda_bytes.len() {
@@ -1125,8 +1125,8 @@ impl Thread {
offset: offsets_end,
limit: len,
},
sg_entries: Vec::new(),
ancestors: Vec::new(),
sg_entries: KVec::new(),
ancestors: KVec::new(),
});
// Traverse the objects specified.

View File

@@ -9,12 +9,11 @@ use kernel::bindings::{
rust_binder_transaction, task_struct,
};
use kernel::error::Result;
use kernel::ffi::{c_int, c_uint, c_ulong};
use kernel::task::{Pid, Task};
use kernel::tracepoint::declare_trace;
use kernel::uapi;
use core::ffi::{c_int, c_uint, c_ulong};
declare_trace! {
unsafe fn rust_binder_ioctl(cmd: c_uint, arg: c_ulong);
unsafe fn rust_binder_ioctl_done(ret: c_int);