Merge branch 'android16-6.12' into android16-6.12-lts

Catch the -lts branch up with the recent KABI update to ensure that
both branches are working off of the same stable abi signatures.

Changes merge in here are:

* 6cf7b79fed ANDROID: 2025/04/30 KMI update
* 4af53766a2 ANDROID: gki_defconfig: Enable CONFIG_CFS_BANDWIDTH
* 563c4f2322 ANDROID: mm: add a new vendor hook in filemap_map_pages
* 0c380917c7 ANDROID: GKI: Enable CONFIG_READ_ONLY_THP_FOR_FS
* ef7b72aac9 ANDROID: GKI: Update symbol list for vivo
* 8529ec391e ANDROID: vendor_hooks: add hook when adding or deleting folios
* 3ccf388466 UPSTREAM: PM: EM: Add min/max available performance state limits
* bc95a02e11 ANDROID: GKI: add OEM data to struct percpu_rw_semaphore
* 8e70bb6369 ANDROID: vendor_hooks: restore field in mem_cgroup
* b464fe7226 UPSTREAM: cgroup/rstat: Fix forceidle time in cpu.stat
* 0dcd826d33 UPSTREAM: cgroup/rstat: Tracking cgroup-level niced CPU time
* 871ecf0300 UPSTREAM: can: statistics: use atomic access in hot path
* 50817926fc UPSTREAM: RDMA/core: Don't expose hw_counters outside of init net namespace
* 9f801ea82c UPSTREAM: of: property: Increase NR_FWNODE_REFERENCE_ARGS
* f3f9ecfdc6 UPSTREAM: Bluetooth: L2CAP: Fix corrupted list in hci_chan_del
* 4a5496412e UPSTREAM: Revert "Bluetooth: hci_core: Fix sleeping function called from invalid context"
* e0f526b01b ANDROID: GKI: add more vfs-only exports to their own namespace
* 7c5c987c29 UPSTREAM: acpi: typec: ucsi: Introduce a ->poll_cci method
* cf4c97db94 UPSTREAM: drm/nouveau: select FW caching
* 47b596b4c5 UPSTREAM: drm/nouveau: Run DRM default client setup
* 0ec7636837 UPSTREAM: drm/fbdev-ttm: Support struct drm_driver.fbdev_probe
* 9df628f8b3 UPSTREAM: drm: Add client-agnostic setup helper
* a387aeb8b4 UPSTREAM: drm/fbdev: Add memory-agnostic fbdev client
* e3e9468688 ANDROID: update KABI macros for gendwarfksyms
* 70c54eefee ANDROID: rename KABI reserved field names
* 58b70f4a59 ANDROID: KVM: arm64: Exposes fixblock_map to pKVM modules
* 87dd5e4d38 ANDROID: KVM: iommu: Add flags to attach_dev
* 2ea2a15fa1 ANDROID: KVM: iommu: Abstract hypercalls
* 37dc91cf2d ANDROID: GKI: Update protected exports and symbols list for HCI Drv
* b5babbd250 FROMGIT: Revert "Bluetooth: btusb: Configure altsetting for HCI_USER_CHANNEL"
* 912521fc4e FROMGIT: Bluetooth: btusb: Add HCI Drv commands for configuring altsetting
* 99db8bf902 FROMGIT: Bluetooth: Introduce HCI Driver protocol
* cb0faf060f Revert "ANDROID: GKI: Enable CONFIG_BT_HCIBTUSB_AUTO_ISOC_ALT"
* be12dbb882 Revert "ANDROID: GKI: fix protected exports list"
* 8ffa03307c ANDROID: gunyah: Handle platform specific return value of vcpu_run
* f49bbfed7a ANDROID: ABI: qcom: Update qcom ABI for DMA

Change-Id: I82d4f38a8e4cf9423998fa64cd31efc20ebac2f8
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2025-04-30 16:44:57 +00:00
89 changed files with 9207 additions and 7910 deletions

View File

@@ -32,6 +32,7 @@ CONFIG_MEMCG=y
CONFIG_MEMCG_V1=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_SCHED=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_UCLAMP_TASK_GROUP=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
@@ -128,6 +129,7 @@ CONFIG_MEMORY_HOTREMOVE=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
CONFIG_READ_ONLY_THP_FOR_FS=y
CONFIG_CLEANCACHE=y
CONFIG_CMA=y
CONFIG_CMA_DEBUGFS=y
@@ -296,8 +298,6 @@ CONFIG_BT=m
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
CONFIG_BT_HIDP=m
CONFIG_BT_HCIBTUSB=m
CONFIG_BT_HCIBTUSB_AUTO_ISOC_ALT=y
CONFIG_BT_HCIBTSDIO=m
CONFIG_BT_HCIUART=m
CONFIG_BT_HCIUART_LL=y

View File

@@ -8,6 +8,7 @@
#include <linux/gunyah.h>
#include <linux/uuid.h>
#define GUNYAH_VCPU_RUN_STATE_PSCI_SYSTEM_RESET 256
/* {c1d58fcd-a453-5fdb-9265-ce36673d5f14} */
static const uuid_t GUNYAH_UUID = UUID_INIT(0xc1d58fcd, 0xa453, 0x5fdb, 0x92,
0x65, 0xce, 0x36, 0x67, 0x3d, 0x5f,
@@ -285,6 +286,14 @@ gunyah_hypercall_vcpu_run(u64 capid, unsigned long *resume_data,
resp->state_data[2] = res.a4;
}
/*
* PSCI_SYSTEM_RESET is also a state where VM is shutdown
* Translate it to GUNYAH_VCPU_STATE_SYSTEM_OFF as VMM will
* be able to take the action based on the exit_info.
*/
if (resp->sized_state == GUNYAH_VCPU_RUN_STATE_PSCI_SYSTEM_RESET)
resp->sized_state = GUNYAH_VCPU_STATE_SYSTEM_OFF;
return res.a0;
}
EXPORT_SYMBOL_GPL(gunyah_hypercall_vcpu_run);

View File

@@ -1763,6 +1763,26 @@ static inline void kvm_iommu_sg_free(struct kvm_iommu_sg *sg, unsigned int nents
free_pages_exact(sg, PAGE_ALIGN(nents * sizeof(struct kvm_iommu_sg)));
}
#ifndef __KVM_NVHE_HYPERVISOR__
int kvm_iommu_attach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
unsigned int endpoint, unsigned int pasid,
unsigned int ssid_bits, unsigned long flags);
int kvm_iommu_detach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
unsigned int endpoint, unsigned int pasid);
int kvm_iommu_alloc_domain(pkvm_handle_t domain_id, int type);
int kvm_iommu_free_domain(pkvm_handle_t domain_id);
int kvm_iommu_map_pages(pkvm_handle_t domain_id, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *total_mapped);
size_t kvm_iommu_unmap_pages(pkvm_handle_t domain_id, unsigned long iova,
size_t pgsize, size_t pgcount);
phys_addr_t kvm_iommu_iova_to_phys(pkvm_handle_t domain_id, unsigned long iova);
size_t kvm_iommu_map_sg(pkvm_handle_t domain_id, struct kvm_iommu_sg *sg,
unsigned long iova, unsigned int nent,
unsigned int prot, gfp_t gfp);
#endif
int kvm_iommu_share_hyp_sg(struct kvm_iommu_sg *sg, unsigned int nents);
int kvm_iommu_unshare_hyp_sg(struct kvm_iommu_sg *sg, unsigned int nents);
int kvm_iommu_device_num_ids(struct device *dev);

View File

@@ -51,6 +51,14 @@ enum pkvm_psci_notification {
* new mapping is visible.
* @fixmap_unmap: Unmap a page from the hypervisor fixmap. This
* call is required between each @fixmap_map().
* @fixblock_map: Map a PMD-size large page into a CPU-shared
* fixmap. This can be used to replace and speed-up
* a set of @fixmap_map. @fixblock_unmap must be
* called between each mappings to do cache
* maintenance and ensure the new mapping is visible.
* @fixblock_unmap: Unmap a PMD-size large page from the hypervisor
* fixmap. This call is required between each
* @fixblock_map.
* @linear_map_early: Map a large portion of memory into the
* hypervisor linear VA space. This is intended to
* be used only for module bootstrap and must be
@@ -197,6 +205,8 @@ struct pkvm_module_ops {
void (*putx64)(u64 x);
void *(*fixmap_map)(phys_addr_t phys);
void (*fixmap_unmap)(void);
void *(*fixblock_map)(phys_addr_t phys);
void (*fixblock_unmap)(void);
void *(*linear_map_early)(phys_addr_t phys, size_t size, enum kvm_pgtable_prot prot);
void (*linear_unmap_early)(void *addr, size_t size);
void (*flush_dcache_to_poc)(void *addr, size_t size);

View File

@@ -18,7 +18,8 @@ void kvm_iommu_reclaim_pages_atomic(void *p, u8 order);
int kvm_iommu_alloc_domain(pkvm_handle_t domain_id, int type);
int kvm_iommu_free_domain(pkvm_handle_t domain_id);
int kvm_iommu_attach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
u32 endpoint_id, u32 pasid, u32 pasid_bits);
u32 endpoint_id, u32 pasid, u32 pasid_bits,
unsigned long flags);
int kvm_iommu_detach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
u32 endpoint_id, u32 pasid);
size_t kvm_iommu_map_pages(pkvm_handle_t domain_id,
@@ -56,7 +57,7 @@ struct kvm_iommu_ops {
void (*free_domain)(struct kvm_hyp_iommu_domain *domain);
struct kvm_hyp_iommu *(*get_iommu_by_id)(pkvm_handle_t iommu_id);
int (*attach_dev)(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_domain *domain,
u32 endpoint_id, u32 pasid, u32 pasid_bits);
u32 endpoint_id, u32 pasid, u32 pasid_bits, unsigned long flags);
int (*detach_dev)(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_domain *domain,
u32 endpoint_id, u32 pasid);
int (*map_pages)(struct kvm_hyp_iommu_domain *domain, unsigned long iova,

View File

@@ -1724,9 +1724,10 @@ static void handle___pkvm_host_iommu_attach_dev(struct kvm_cpu_context *host_ctx
DECLARE_REG(unsigned int, endpoint, host_ctxt, 3);
DECLARE_REG(unsigned int, pasid, host_ctxt, 4);
DECLARE_REG(unsigned int, pasid_bits, host_ctxt, 5);
DECLARE_REG(unsigned long, flags, host_ctxt, 6);
ret = kvm_iommu_attach_dev(iommu, domain, endpoint,
pasid, pasid_bits);
pasid, pasid_bits, flags);
hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
}

View File

@@ -387,7 +387,8 @@ int kvm_iommu_force_free_domain(pkvm_handle_t domain_id, struct pkvm_hyp_vm *vm)
}
int kvm_iommu_attach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
u32 endpoint_id, u32 pasid, u32 pasid_bits)
u32 endpoint_id, u32 pasid, u32 pasid_bits,
unsigned long flags)
{
int ret;
struct kvm_hyp_iommu *iommu;
@@ -417,7 +418,7 @@ int kvm_iommu_attach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
goto out_unlock;
}
ret = kvm_iommu_ops->attach_dev(iommu, domain, endpoint_id, pasid, pasid_bits);
ret = kvm_iommu_ops->attach_dev(iommu, domain, endpoint_id, pasid, pasid_bits, flags);
if (ret)
domain_put(domain);

View File

@@ -86,7 +86,7 @@ static bool pkvm_guest_iommu_attach_dev(struct pkvm_hyp_vcpu *hyp_vcpu, u64 *exi
iommu_id = route.iommu;
sid = route.sid;
ret = kvm_iommu_attach_dev(iommu_id, domain_id, sid, pasid, pasid_bits);
ret = kvm_iommu_attach_dev(iommu_id, domain_id, sid, pasid, pasid_bits, 0);
if (ret == -ENOMEM) {
/*
* The driver will request memory when returning -ENOMEM, so go back to host to

View File

@@ -216,6 +216,8 @@ const struct pkvm_module_ops module_ops = {
.putx64 = hyp_putx64,
.fixmap_map = hyp_fixmap_map,
.fixmap_unmap = hyp_fixmap_unmap,
.fixblock_map = hyp_fixblock_map,
.fixblock_unmap = hyp_fixblock_unmap,
.linear_map_early = __pkvm_linear_map_early,
.linear_unmap_early = __pkvm_linear_unmap_early,
.flush_dcache_to_poc = __kvm_flush_dcache_to_poc,

View File

@@ -5,11 +5,53 @@
*/
#include <asm/kvm_mmu.h>
#include <asm/kvm_pkvm.h>
#include <kvm/iommu.h>
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
#define kvm_call_hyp_nvhe_mc(...) \
({ \
struct arm_smccc_res __res; \
do { \
__res = kvm_call_hyp_nvhe_smccc(__VA_ARGS__); \
} while (__res.a1 && !kvm_iommu_topup_memcache(&__res, GFP_KERNEL));\
__res.a1; \
})
static int kvm_iommu_topup_memcache(struct arm_smccc_res *res, gfp_t gfp)
{
struct kvm_hyp_req req;
hyp_reqs_smccc_decode(res, &req);
if ((res->a1 == -ENOMEM) && (req.type != KVM_HYP_REQ_TYPE_MEM)) {
/*
* There is no way for drivers to populate hyp_alloc requests,
* so -ENOMEM + no request indicates that.
*/
return __pkvm_topup_hyp_alloc(1);
} else if (req.type != KVM_HYP_REQ_TYPE_MEM) {
return -EBADE;
}
if (req.mem.dest == REQ_MEM_DEST_HYP_IOMMU) {
return __pkvm_topup_hyp_alloc_mgt_gfp(HYP_ALLOC_MGT_IOMMU_ID,
req.mem.nr_pages,
req.mem.sz_alloc,
gfp);
} else if (req.mem.dest == REQ_MEM_DEST_HYP_ALLOC) {
/* Fill hyp alloc*/
return __pkvm_topup_hyp_alloc(req.mem.nr_pages);
}
pr_err("Bogus mem request");
return -EBADE;
}
struct kvm_iommu_driver *iommu_driver;
extern struct kvm_iommu_ops *kvm_nvhe_sym(kvm_iommu_ops);
@@ -163,3 +205,129 @@ void kvm_iommu_guest_free_mc(struct kvm_hyp_memcache *mc)
else
free_hyp_memcache(mc);
}
/* Hypercall abstractions exposed to kernel IOMMU drivers */
int kvm_iommu_attach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
unsigned int endpoint, unsigned int pasid,
unsigned int ssid_bits, unsigned long flags)
{
return kvm_call_hyp_nvhe_mc(__pkvm_host_iommu_attach_dev, iommu_id, domain_id,
endpoint, pasid, ssid_bits, flags);
}
EXPORT_SYMBOL(kvm_iommu_attach_dev);
int kvm_iommu_detach_dev(pkvm_handle_t iommu_id, pkvm_handle_t domain_id,
unsigned int endpoint, unsigned int pasid)
{
return kvm_call_hyp_nvhe(__pkvm_host_iommu_detach_dev, iommu_id, domain_id,
endpoint, pasid);
}
EXPORT_SYMBOL(kvm_iommu_detach_dev);
int kvm_iommu_alloc_domain(pkvm_handle_t domain_id, int type)
{
return kvm_call_hyp_nvhe_mc(__pkvm_host_iommu_alloc_domain,
domain_id, type);
}
EXPORT_SYMBOL(kvm_iommu_alloc_domain);
int kvm_iommu_free_domain(pkvm_handle_t domain_id)
{
return kvm_call_hyp_nvhe(__pkvm_host_iommu_free_domain, domain_id);
}
EXPORT_SYMBOL(kvm_iommu_free_domain);
int kvm_iommu_map_pages(pkvm_handle_t domain_id, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *total_mapped)
{
size_t mapped;
size_t size = pgsize * pgcount;
struct arm_smccc_res res;
do {
res = kvm_call_hyp_nvhe_smccc(__pkvm_host_iommu_map_pages, domain_id,
iova, paddr, pgsize, pgcount, prot);
mapped = res.a1;
iova += mapped;
paddr += mapped;
WARN_ON(mapped % pgsize);
WARN_ON(mapped > pgcount * pgsize);
pgcount -= mapped / pgsize;
*total_mapped += mapped;
} while (*total_mapped < size && !kvm_iommu_topup_memcache(&res, gfp));
if (*total_mapped < size)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(kvm_iommu_map_pages);
size_t kvm_iommu_unmap_pages(pkvm_handle_t domain_id, unsigned long iova,
size_t pgsize, size_t pgcount)
{
size_t unmapped;
size_t total_unmapped = 0;
size_t size = pgsize * pgcount;
struct arm_smccc_res res;
do {
res = kvm_call_hyp_nvhe_smccc(__pkvm_host_iommu_unmap_pages,
domain_id, iova, pgsize, pgcount);
unmapped = res.a1;
total_unmapped += unmapped;
iova += unmapped;
WARN_ON(unmapped % pgsize);
pgcount -= unmapped / pgsize;
/*
* The page table driver can unmap less than we asked for. If it
* didn't unmap anything at all, then it either reached the end
* of the range, or it needs a page in the memcache to break a
* block mapping.
*/
} while (total_unmapped < size &&
(unmapped || !kvm_iommu_topup_memcache(&res, GFP_ATOMIC)));
return total_unmapped;
}
EXPORT_SYMBOL(kvm_iommu_unmap_pages);
phys_addr_t kvm_iommu_iova_to_phys(pkvm_handle_t domain_id, unsigned long iova)
{
return kvm_call_hyp_nvhe(__pkvm_host_iommu_iova_to_phys, domain_id, iova);
}
EXPORT_SYMBOL(kvm_iommu_iova_to_phys);
size_t kvm_iommu_map_sg(pkvm_handle_t domain_id, struct kvm_iommu_sg *sg,
unsigned long iova, unsigned int nent,
unsigned int prot, gfp_t gfp)
{
size_t mapped, total_mapped = 0;
struct arm_smccc_res res;
do {
res = kvm_call_hyp_nvhe_smccc(__pkvm_host_iommu_map_sg,
domain_id, iova, sg, nent, prot);
mapped = res.a1;
iova += mapped;
total_mapped += mapped;
/* Skip mapped */
while (mapped) {
if (mapped < (sg->pgsize * sg->pgcount)) {
sg->phys += mapped;
sg->pgcount -= mapped / sg->pgsize;
mapped = 0;
} else {
mapped -= sg->pgsize * sg->pgcount;
sg++;
nent--;
}
}
kvm_iommu_topup_memcache(&res, gfp);
} while (nent);
return total_mapped;
}
EXPORT_SYMBOL(kvm_iommu_map_sg);

View File

@@ -33,6 +33,7 @@ CONFIG_MEMCG=y
CONFIG_MEMCG_V1=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_SCHED=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_UCLAMP_TASK_GROUP=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
@@ -119,6 +120,7 @@ CONFIG_MEMORY_HOTREMOVE=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
CONFIG_READ_ONLY_THP_FOR_FS=y
CONFIG_CLEANCACHE=y
CONFIG_CMA=y
CONFIG_CMA_DEBUGFS=y
@@ -286,8 +288,6 @@ CONFIG_BT=m
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
CONFIG_BT_HIDP=m
CONFIG_BT_HCIBTUSB=m
CONFIG_BT_HCIBTUSB_AUTO_ISOC_ALT=y
CONFIG_BT_HCIBTSDIO=m
CONFIG_BT_HCIUART=m
CONFIG_BT_HCIUART_LL=y

View File

@@ -1,5 +1,5 @@
BRANCH=android16-6.12
KMI_GENERATION=3
KMI_GENERATION=4
CLANG_VERSION=r536225
RUSTC_VERSION=1.82.0
AARCH64_NDK_TRIPLE=aarch64-linux-android31

View File

@@ -378,6 +378,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_flush_work_wait_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_flush_work_wait_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_fuse_request_send);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_fuse_request_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_lruvec_add_folio);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_lruvec_del_folio);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tcp_sock_error);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tcp_fastsyn);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tcp_state_change);

View File

@@ -1835,6 +1835,7 @@ MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default:
MODULE_DESCRIPTION("Loopback device support");
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);
static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)

View File

@@ -56,18 +56,6 @@ config BT_HCIBTUSB_POLL_SYNC
Say Y here to enable USB poll_sync for Bluetooth USB devices by
default.
config BT_HCIBTUSB_AUTO_ISOC_ALT
bool "Automatically adjust alternate setting for Isoc endpoints"
depends on BT_HCIBTUSB
default y if CHROME_PLATFORMS
help
Say Y here to automatically adjusting the alternate setting for
HCI_USER_CHANNEL whenever a SCO link is established.
When enabled, btusb intercepts the HCI_EV_SYNC_CONN_COMPLETE packets
and configures isoc endpoint alternate setting automatically when
HCI_USER_CHANNEL is in use.
config BT_HCIBTUSB_BCM
bool "Broadcom protocol support"
depends on BT_HCIBTUSB

View File

@@ -21,6 +21,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/hci_drv.h>
#include "btintel.h"
#include "btbcm.h"
@@ -34,7 +35,6 @@ static bool force_scofix;
static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND);
static bool enable_poll_sync = IS_ENABLED(CONFIG_BT_HCIBTUSB_POLL_SYNC);
static bool reset = true;
static bool auto_isoc_alt = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTO_ISOC_ALT);
static struct usb_driver btusb_driver;
@@ -1073,42 +1073,6 @@ static inline void btusb_free_frags(struct btusb_data *data)
spin_unlock_irqrestore(&data->rxlock, flags);
}
static void btusb_sco_connected(struct btusb_data *data, struct sk_buff *skb)
{
struct hci_event_hdr *hdr = (void *) skb->data;
struct hci_ev_sync_conn_complete *ev =
(void *) skb->data + sizeof(*hdr);
struct hci_dev *hdev = data->hdev;
unsigned int notify_air_mode;
if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
return;
if (skb->len < sizeof(*hdr) || hdr->evt != HCI_EV_SYNC_CONN_COMPLETE)
return;
if (skb->len != sizeof(*hdr) + sizeof(*ev) || ev->status)
return;
switch (ev->air_mode) {
case BT_CODEC_CVSD:
notify_air_mode = HCI_NOTIFY_ENABLE_SCO_CVSD;
break;
case BT_CODEC_TRANSPARENT:
notify_air_mode = HCI_NOTIFY_ENABLE_SCO_TRANSP;
break;
default:
return;
}
bt_dev_info(hdev, "enabling SCO with air mode %u", ev->air_mode);
data->sco_num = 1;
data->air_mode = notify_air_mode;
schedule_work(&data->work);
}
static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
{
if (data->intr_interval) {
@@ -1116,10 +1080,6 @@ static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
schedule_delayed_work(&data->rx_work, 0);
}
/* Configure altsetting for HCI_USER_CHANNEL on SCO connected */
if (auto_isoc_alt && hci_dev_test_flag(data->hdev, HCI_USER_CHANNEL))
btusb_sco_connected(data, skb);
return data->recv_event(data->hdev, skb);
}
@@ -3679,6 +3639,134 @@ static const struct file_operations force_poll_sync_fops = {
.llseek = default_llseek,
};
#define BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS \
hci_opcode_pack(HCI_DRV_OGF_DRIVER_SPECIFIC, 0x0000)
#define BTUSB_HCI_DRV_SUPPORTED_ALTSETTINGS_SIZE 0
struct btusb_hci_drv_rp_supported_altsettings {
__u8 num;
__u8 altsettings[];
} __packed;
#define BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING \
hci_opcode_pack(HCI_DRV_OGF_DRIVER_SPECIFIC, 0x0001)
#define BTUSB_HCI_DRV_SWITCH_ALTSETTING_SIZE 1
struct btusb_hci_drv_cmd_switch_altsetting {
__u8 altsetting;
} __packed;
static const struct {
u16 opcode;
const char *desc;
} btusb_hci_drv_supported_commands[] = {
/* Common commands */
{ HCI_DRV_OP_READ_INFO, "Read Info" },
/* Driver specific commands */
{ BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS, "Supported Altsettings" },
{ BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING, "Switch Altsetting" },
};
static int btusb_hci_drv_read_info(struct hci_dev *hdev, void *data,
u16 data_len)
{
struct hci_drv_rp_read_info *rp;
size_t rp_size;
int err, i;
u16 opcode, num_supported_commands =
ARRAY_SIZE(btusb_hci_drv_supported_commands);
rp_size = sizeof(*rp) + num_supported_commands * 2;
rp = kmalloc(rp_size, GFP_KERNEL);
if (!rp)
return -ENOMEM;
strscpy_pad(rp->driver_name, btusb_driver.name);
rp->num_supported_commands = cpu_to_le16(num_supported_commands);
for (i = 0; i < num_supported_commands; i++) {
opcode = btusb_hci_drv_supported_commands[i].opcode;
bt_dev_info(hdev,
"Supported HCI Drv command (0x%02x|0x%04x): %s",
hci_opcode_ogf(opcode),
hci_opcode_ocf(opcode),
btusb_hci_drv_supported_commands[i].desc);
rp->supported_commands[i] = cpu_to_le16(opcode);
}
err = hci_drv_cmd_complete(hdev, HCI_DRV_OP_READ_INFO,
HCI_DRV_STATUS_SUCCESS, rp, rp_size);
kfree(rp);
return err;
}
static int btusb_hci_drv_supported_altsettings(struct hci_dev *hdev, void *data,
u16 data_len)
{
struct btusb_data *drvdata = hci_get_drvdata(hdev);
struct btusb_hci_drv_rp_supported_altsettings *rp;
size_t rp_size;
int err;
u8 i;
/* There are at most 7 alt (0 - 6) */
rp = kmalloc(sizeof(*rp) + 7, GFP_KERNEL);
rp->num = 0;
if (!drvdata->isoc)
goto done;
for (i = 0; i <= 6; i++) {
if (btusb_find_altsetting(drvdata, i))
rp->altsettings[rp->num++] = i;
}
done:
rp_size = sizeof(*rp) + rp->num;
err = hci_drv_cmd_complete(hdev, BTUSB_HCI_DRV_OP_SUPPORTED_ALTSETTINGS,
HCI_DRV_STATUS_SUCCESS, rp, rp_size);
kfree(rp);
return err;
}
static int btusb_hci_drv_switch_altsetting(struct hci_dev *hdev, void *data,
u16 data_len)
{
struct btusb_hci_drv_cmd_switch_altsetting *cmd = data;
u8 status;
if (cmd->altsetting > 6) {
status = HCI_DRV_STATUS_INVALID_PARAMETERS;
} else {
if (btusb_switch_alt_setting(hdev, cmd->altsetting))
status = HCI_DRV_STATUS_UNSPECIFIED_ERROR;
else
status = HCI_DRV_STATUS_SUCCESS;
}
return hci_drv_cmd_status(hdev, BTUSB_HCI_DRV_OP_SWITCH_ALTSETTING,
status);
}
static const struct hci_drv_handler btusb_hci_drv_common_handlers[] = {
{ btusb_hci_drv_read_info, HCI_DRV_READ_INFO_SIZE },
};
static const struct hci_drv_handler btusb_hci_drv_specific_handlers[] = {
{ btusb_hci_drv_supported_altsettings,
BTUSB_HCI_DRV_SUPPORTED_ALTSETTINGS_SIZE },
{ btusb_hci_drv_switch_altsetting,
BTUSB_HCI_DRV_SWITCH_ALTSETTING_SIZE },
};
static struct hci_drv btusb_hci_drv = {
.common_handler_count = ARRAY_SIZE(btusb_hci_drv_common_handlers),
.common_handlers = btusb_hci_drv_common_handlers,
.specific_handler_count = ARRAY_SIZE(btusb_hci_drv_specific_handlers),
.specific_handlers = btusb_hci_drv_specific_handlers,
};
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -3818,12 +3906,13 @@ static int btusb_probe(struct usb_interface *intf,
data->reset_gpio = reset_gpio;
}
hdev->open = btusb_open;
hdev->close = btusb_close;
hdev->flush = btusb_flush;
hdev->send = btusb_send_frame;
hdev->notify = btusb_notify;
hdev->wakeup = btusb_wakeup;
hdev->open = btusb_open;
hdev->close = btusb_close;
hdev->flush = btusb_flush;
hdev->send = btusb_send_frame;
hdev->notify = btusb_notify;
hdev->wakeup = btusb_wakeup;
hdev->hci_drv = &btusb_hci_drv;
#ifdef CONFIG_PM
err = btusb_config_oob_wake(hdev);

View File

@@ -211,6 +211,18 @@ config DRM_DEBUG_MODESET_LOCK
If in doubt, say "N".
config DRM_CLIENT_SELECTION
bool
depends on DRM
select DRM_CLIENT_SETUP if DRM_FBDEV_EMULATION
help
Drivers that support in-kernel DRM clients have to select this
option.
config DRM_CLIENT_SETUP
bool
depends on DRM_CLIENT_SELECTION
config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM

View File

@@ -144,8 +144,12 @@ drm_kms_helper-y := \
drm_rect.o \
drm_self_refresh_helper.o \
drm_simple_kms_helper.o
drm_kms_helper-$(CONFIG_DRM_CLIENT_SETUP) += \
drm_client_setup.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += \
drm_fbdev_client.o \
drm_fb_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
#

View File

@@ -0,0 +1,66 @@
// SPDX-License-Identifier: MIT
#include <drm/drm_client_setup.h>
#include <drm/drm_device.h>
#include <drm/drm_fbdev_client.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
/**
* drm_client_setup() - Setup in-kernel DRM clients
* @dev: DRM device
* @format: Preferred pixel format for the device. Use NULL, unless
* there is clearly a driver-preferred format.
*
* This function sets up the in-kernel DRM clients. Restore, hotplug
* events and teardown are all taken care of.
*
* Drivers should call drm_client_setup() after registering the new
* DRM device with drm_dev_register(). This function is safe to call
* even when there are no connectors present. Setup will be retried
* on the next hotplug event.
*
* The clients are destroyed by drm_dev_unregister().
*/
void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format)
{
int ret;
ret = drm_fbdev_client_setup(dev, format);
if (ret)
drm_warn(dev, "Failed to set up DRM client; error %d\n", ret);
}
EXPORT_SYMBOL(drm_client_setup);
/**
* drm_client_setup_with_fourcc() - Setup in-kernel DRM clients for color mode
* @dev: DRM device
* @fourcc: Preferred pixel format as 4CC code for the device
*
* This function sets up the in-kernel DRM clients. It is equivalent
* to drm_client_setup(), but expects a 4CC code as second argument.
*/
void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc)
{
drm_client_setup(dev, drm_format_info(fourcc));
}
EXPORT_SYMBOL(drm_client_setup_with_fourcc);
/**
* drm_client_setup_with_color_mode() - Setup in-kernel DRM clients for color mode
* @dev: DRM device
* @color_mode: Preferred color mode for the device
*
* This function sets up the in-kernel DRM clients. It is equivalent
* to drm_client_setup(), but expects a color mode as second argument.
*
* Do not use this function in new drivers. Prefer drm_client_setup() with a
* format of NULL.
*/
void drm_client_setup_with_color_mode(struct drm_device *dev, unsigned int color_mode)
{
u32 fourcc = drm_driver_color_mode_format(dev, color_mode);
drm_client_setup_with_fourcc(dev, fourcc);
}
EXPORT_SYMBOL(drm_client_setup_with_color_mode);

View File

@@ -492,8 +492,8 @@ EXPORT_SYMBOL(drm_fb_helper_init);
* @fb_helper: driver-allocated fbdev helper
*
* A helper to alloc fb_info and the member cmap. Called by the driver
* within the fb_probe fb_helper callback function. Drivers do not
* need to release the allocated fb_info structure themselves, this is
* within the struct &drm_driver.fbdev_probe callback function. Drivers do
* not need to release the allocated fb_info structure themselves, this is
* automatically done when calling drm_fb_helper_fini().
*
* RETURNS:
@@ -1610,7 +1610,7 @@ static int drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper,
/*
* Allocates the backing storage and sets up the fbdev info structure through
* the ->fb_probe callback.
* the ->fbdev_probe callback.
*/
static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper)
{
@@ -1628,7 +1628,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper)
}
/* push down into drivers */
ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
if (dev->driver->fbdev_probe)
ret = dev->driver->fbdev_probe(fb_helper, &sizes);
else if (fb_helper->funcs)
ret = fb_helper->funcs->fb_probe(fb_helper, &sizes);
if (ret < 0)
return ret;
@@ -1700,7 +1703,7 @@ static void drm_fb_helper_fill_var(struct fb_info *info,
* instance and the drm framebuffer allocated in &drm_fb_helper.fb.
*
* Drivers should call this (or their equivalent setup code) from their
* &drm_fb_helper_funcs.fb_probe callback after having allocated the fbdev
* &drm_driver.fbdev_probe callback after having allocated the fbdev
* backing storage framebuffer.
*/
void drm_fb_helper_fill_info(struct fb_info *info,
@@ -1856,7 +1859,7 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
* Note that this also registers the fbdev and so allows userspace to call into
* the driver through the fbdev interfaces.
*
* This function will call down into the &drm_fb_helper_funcs.fb_probe callback
* This function will call down into the &drm_driver.fbdev_probe callback
* to let the driver allocate and initialize the fbdev info structure and the
* drm framebuffer used to back the fbdev. drm_fb_helper_fill_info() is provided
* as a helper to setup simple default values for the fbdev info structure.

View File

@@ -0,0 +1,141 @@
// SPDX-License-Identifier: MIT
#include <drm/drm_client.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_client.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
/*
* struct drm_client_funcs
*/
static void drm_fbdev_client_unregister(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
if (fb_helper->info) {
drm_fb_helper_unregister_info(fb_helper);
} else {
drm_client_release(&fb_helper->client);
drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
}
}
static int drm_fbdev_client_restore(struct drm_client_dev *client)
{
drm_fb_helper_lastclose(client->dev);
return 0;
}
static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
struct drm_device *dev = client->dev;
int ret;
if (dev->fb_helper)
return drm_fb_helper_hotplug_event(dev->fb_helper);
ret = drm_fb_helper_init(dev, fb_helper);
if (ret)
goto err_drm_err;
if (!drm_drv_uses_atomic_modeset(dev))
drm_helper_disable_unused_functions(dev);
ret = drm_fb_helper_initial_config(fb_helper);
if (ret)
goto err_drm_fb_helper_fini;
return 0;
err_drm_fb_helper_fini:
drm_fb_helper_fini(fb_helper);
err_drm_err:
drm_err(dev, "fbdev: Failed to setup emulation (ret=%d)\n", ret);
return ret;
}
static const struct drm_client_funcs drm_fbdev_client_funcs = {
.owner = THIS_MODULE,
.unregister = drm_fbdev_client_unregister,
.restore = drm_fbdev_client_restore,
.hotplug = drm_fbdev_client_hotplug,
};
/**
* drm_fbdev_client_setup() - Setup fbdev emulation
* @dev: DRM device
* @format: Preferred color format for the device. DRM_FORMAT_XRGB8888
* is used if this is zero.
*
* This function sets up fbdev emulation. Restore, hotplug events and
* teardown are all taken care of. Drivers that do suspend/resume need
* to call drm_fb_helper_set_suspend_unlocked() themselves. Simple
* drivers might use drm_mode_config_helper_suspend().
*
* This function is safe to call even when there are no connectors present.
* Setup will be retried on the next hotplug event.
*
* The fbdev client is destroyed by drm_dev_unregister().
*
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
int drm_fbdev_client_setup(struct drm_device *dev, const struct drm_format_info *format)
{
struct drm_fb_helper *fb_helper;
unsigned int color_mode;
int ret;
/* TODO: Use format info throughout DRM */
if (format) {
unsigned int bpp = drm_format_info_bpp(format, 0);
switch (bpp) {
case 16:
color_mode = format->depth; // could also be 15
break;
default:
color_mode = bpp;
}
} else {
switch (dev->mode_config.preferred_depth) {
case 0:
case 24:
color_mode = 32;
break;
default:
color_mode = dev->mode_config.preferred_depth;
}
}
drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
if (!fb_helper)
return -ENOMEM;
drm_fb_helper_prepare(dev, fb_helper, color_mode, NULL);
ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
if (ret) {
drm_err(dev, "Failed to register client: %d\n", ret);
goto err_drm_client_init;
}
drm_client_register(&fb_helper->client);
return 0;
err_drm_client_init:
drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
return ret;
}
EXPORT_SYMBOL(drm_fbdev_client_setup);

View File

@@ -71,71 +71,7 @@ static const struct fb_ops drm_fbdev_ttm_fb_ops = {
static int drm_fbdev_ttm_helper_fb_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
struct drm_client_buffer *buffer;
struct fb_info *info;
size_t screen_size;
void *screen_buffer;
u32 format;
int ret;
drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
sizes->surface_width, sizes->surface_height,
sizes->surface_bpp);
format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
sizes->surface_depth);
buffer = drm_client_framebuffer_create(client, sizes->surface_width,
sizes->surface_height, format);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
fb_helper->buffer = buffer;
fb_helper->fb = buffer->fb;
screen_size = buffer->gem->size;
screen_buffer = vzalloc(screen_size);
if (!screen_buffer) {
ret = -ENOMEM;
goto err_drm_client_framebuffer_delete;
}
info = drm_fb_helper_alloc_info(fb_helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto err_vfree;
}
drm_fb_helper_fill_info(info, fb_helper, sizes);
info->fbops = &drm_fbdev_ttm_fb_ops;
/* screen */
info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
info->screen_buffer = screen_buffer;
info->fix.smem_len = screen_size;
/* deferred I/O */
fb_helper->fbdefio.delay = HZ / 20;
fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
info->fbdefio = &fb_helper->fbdefio;
ret = fb_deferred_io_init(info);
if (ret)
goto err_drm_fb_helper_release_info;
return 0;
err_drm_fb_helper_release_info:
drm_fb_helper_release_info(fb_helper);
err_vfree:
vfree(screen_buffer);
err_drm_client_framebuffer_delete:
fb_helper->fb = NULL;
fb_helper->buffer = NULL;
drm_client_framebuffer_delete(buffer);
return ret;
return drm_fbdev_ttm_driver_fbdev_probe(fb_helper, sizes);
}
static void drm_fbdev_ttm_damage_blit_real(struct drm_fb_helper *fb_helper,
@@ -240,6 +176,82 @@ static const struct drm_fb_helper_funcs drm_fbdev_ttm_helper_funcs = {
.fb_dirty = drm_fbdev_ttm_helper_fb_dirty,
};
/*
* struct drm_driver
*/
int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
struct drm_client_buffer *buffer;
struct fb_info *info;
size_t screen_size;
void *screen_buffer;
u32 format;
int ret;
drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
sizes->surface_width, sizes->surface_height,
sizes->surface_bpp);
format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
sizes->surface_depth);
buffer = drm_client_framebuffer_create(client, sizes->surface_width,
sizes->surface_height, format);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
fb_helper->funcs = &drm_fbdev_ttm_helper_funcs;
fb_helper->buffer = buffer;
fb_helper->fb = buffer->fb;
screen_size = buffer->gem->size;
screen_buffer = vzalloc(screen_size);
if (!screen_buffer) {
ret = -ENOMEM;
goto err_drm_client_framebuffer_delete;
}
info = drm_fb_helper_alloc_info(fb_helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto err_vfree;
}
drm_fb_helper_fill_info(info, fb_helper, sizes);
info->fbops = &drm_fbdev_ttm_fb_ops;
/* screen */
info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
info->screen_buffer = screen_buffer;
info->fix.smem_len = screen_size;
/* deferred I/O */
fb_helper->fbdefio.delay = HZ / 20;
fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
info->fbdefio = &fb_helper->fbdefio;
ret = fb_deferred_io_init(info);
if (ret)
goto err_drm_fb_helper_release_info;
return 0;
err_drm_fb_helper_release_info:
drm_fb_helper_release_info(fb_helper);
err_vfree:
vfree(screen_buffer);
err_drm_client_framebuffer_delete:
fb_helper->fb = NULL;
fb_helper->buffer = NULL;
drm_client_framebuffer_delete(buffer);
return ret;
}
EXPORT_SYMBOL(drm_fbdev_ttm_driver_fbdev_probe);
static void drm_fbdev_ttm_client_unregister(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);

View File

@@ -15,6 +15,12 @@
#include "drm_crtc_internal.h"
#include "drm_internal.h"
/* ANDROID:
* this is needed to get access to dentry_open, which the drm layer does
* need to do.
*/
MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);
/**
* DOC: drm leasing
*

View File

@@ -4,6 +4,8 @@ config DRM_NOUVEAU
depends on DRM && PCI && MMU
select IOMMU_API
select FW_LOADER
select FW_CACHE if PM_SLEEP
select DRM_CLIENT_SELECTION
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER

View File

@@ -31,6 +31,7 @@
#include <linux/dynamic_debug.h>
#include <drm/drm_aperture.h>
#include <drm/drm_client_setup.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem_ttm_helper.h>
@@ -836,6 +837,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
{
struct nvkm_device *device;
struct nouveau_drm *drm;
const struct drm_format_info *format;
int ret;
if (vga_switcheroo_client_probe_defer(pdev))
@@ -873,9 +875,11 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
goto fail_pci;
if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
drm_fbdev_ttm_setup(drm->dev, 8);
format = drm_format_info(DRM_FORMAT_C8);
else
drm_fbdev_ttm_setup(drm->dev, 32);
format = NULL;
drm_client_setup(drm->dev, format);
quirk_broken_nv_runpm(pdev);
return 0;
@@ -1318,6 +1322,8 @@ driver_stub = {
.dumb_create = nouveau_display_dumb_create,
.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
DRM_FBDEV_TTM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
#ifdef GIT_REVISION

View File

@@ -544,6 +544,8 @@ static struct class ib_class = {
static void rdma_init_coredev(struct ib_core_device *coredev,
struct ib_device *dev, struct net *net)
{
bool is_full_dev = &dev->coredev == coredev;
/* This BUILD_BUG_ON is intended to catch layout change
* of union of ib_core_device and device.
* dev must be the first element as ib_core and providers
@@ -555,6 +557,13 @@ static void rdma_init_coredev(struct ib_core_device *coredev,
coredev->dev.class = &ib_class;
coredev->dev.groups = dev->groups;
/*
* Don't expose hw counters outside of the init namespace.
*/
if (!is_full_dev && dev->hw_stats_attr_index)
coredev->dev.groups[dev->hw_stats_attr_index] = NULL;
device_initialize(&coredev->dev);
coredev->owner = dev;
INIT_LIST_HEAD(&coredev->port_list);

View File

@@ -988,6 +988,7 @@ int ib_setup_device_attrs(struct ib_device *ibdev)
for (i = 0; i != ARRAY_SIZE(ibdev->groups); i++)
if (!ibdev->groups[i]) {
ibdev->groups[i] = &data->group;
ibdev->hw_stats_attr_index = i;
return 0;
}
WARN(true, "struct ib_device->groups is too small");

View File

@@ -7,7 +7,6 @@
#include <asm/kvm_pkvm.h>
#include <asm/kvm_mmu.h>
#include <linux/arm-smccc.h>
#include <linux/moduleparam.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
@@ -70,45 +69,6 @@ static DEFINE_IDA(kvm_arm_smmu_domain_ida);
static int atomic_pages;
module_param(atomic_pages, int, 0);
static int kvm_arm_smmu_topup_memcache(struct arm_smccc_res *res, gfp_t gfp)
{
struct kvm_hyp_req req;
hyp_reqs_smccc_decode(res, &req);
if ((res->a1 == -ENOMEM) && (req.type != KVM_HYP_REQ_TYPE_MEM)) {
/*
* There is no way for drivers to populate hyp_alloc requests,
* so -ENOMEM + no request indicates that.
*/
return __pkvm_topup_hyp_alloc(1);
} else if (req.type != KVM_HYP_REQ_TYPE_MEM) {
return -EBADE;
}
if (req.mem.dest == REQ_MEM_DEST_HYP_IOMMU) {
return __pkvm_topup_hyp_alloc_mgt_gfp(HYP_ALLOC_MGT_IOMMU_ID,
req.mem.nr_pages,
req.mem.sz_alloc,
gfp);
} else if (req.mem.dest == REQ_MEM_DEST_HYP_ALLOC) {
/* Fill hyp alloc*/
return __pkvm_topup_hyp_alloc(req.mem.nr_pages);
}
pr_err("Bogus mem request");
return -EBADE;
}
#define kvm_call_hyp_nvhe_mc(...) \
({ \
struct arm_smccc_res __res; \
do { \
__res = kvm_call_hyp_nvhe_smccc(__VA_ARGS__); \
} while (__res.a1 && !kvm_arm_smmu_topup_memcache(&__res, GFP_KERNEL));\
__res.a1; \
})
static struct platform_driver kvm_arm_smmu_driver;
static struct arm_smmu_device *
@@ -258,8 +218,7 @@ static int kvm_arm_smmu_domain_finalize(struct kvm_arm_smmu_domain *kvm_smmu_dom
kvm_smmu_domain->id = ret;
ret = kvm_call_hyp_nvhe_mc(__pkvm_host_iommu_alloc_domain,
kvm_smmu_domain->id, type);
ret = kvm_iommu_alloc_domain(kvm_smmu_domain->id, type);
if (ret) {
ida_free(&kvm_arm_smmu_domain_ida, kvm_smmu_domain->id);
return ret;
@@ -275,7 +234,7 @@ static void kvm_arm_smmu_domain_free(struct iommu_domain *domain)
struct arm_smmu_device *smmu = kvm_smmu_domain->smmu;
if (smmu && (kvm_smmu_domain->domain.type != IOMMU_DOMAIN_IDENTITY)) {
ret = kvm_call_hyp_nvhe(__pkvm_host_iommu_free_domain, kvm_smmu_domain->id);
ret = kvm_iommu_free_domain(kvm_smmu_domain->id);
ida_free(&kvm_arm_smmu_domain_ida, kvm_smmu_domain->id);
}
kfree(kvm_smmu_domain);
@@ -296,8 +255,7 @@ static int kvm_arm_smmu_detach_dev_pasid(struct host_arm_smmu_device *host_smmu,
for (i = 0; i < fwspec->num_ids; i++) {
int sid = fwspec->ids[i];
ret = kvm_call_hyp_nvhe(__pkvm_host_iommu_detach_dev,
host_smmu->id, domain->id, sid, pasid);
ret = kvm_iommu_detach_dev(host_smmu->id, domain->id, sid, pasid);
if (ret) {
dev_err(smmu->dev, "cannot detach device %s (0x%x): %d\n",
dev_name(master->dev), sid, ret);
@@ -365,9 +323,8 @@ static int kvm_arm_smmu_set_dev_pasid(struct iommu_domain *domain,
for (i = 0; i < fwspec->num_ids; i++) {
int sid = fwspec->ids[i];
ret = kvm_call_hyp_nvhe_mc(__pkvm_host_iommu_attach_dev,
host_smmu->id, kvm_smmu_domain->id,
sid, pasid, master->ssid_bits);
ret = kvm_iommu_attach_dev(host_smmu->id, kvm_smmu_domain->id,
sid, pasid, master->ssid_bits, 0);
if (ret) {
dev_err(smmu->dev, "cannot attach device %s (0x%x): %d\n",
dev_name(dev), sid, ret);
@@ -422,27 +379,10 @@ static int kvm_arm_smmu_map_pages(struct iommu_domain *domain,
size_t pgsize, size_t pgcount, int prot,
gfp_t gfp, size_t *total_mapped)
{
size_t mapped;
size_t size = pgsize * pgcount;
struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(domain);
struct arm_smccc_res res;
do {
res = kvm_call_hyp_nvhe_smccc(__pkvm_host_iommu_map_pages,
kvm_smmu_domain->id,
iova, paddr, pgsize, pgcount, prot);
mapped = res.a1;
iova += mapped;
paddr += mapped;
WARN_ON(mapped % pgsize);
WARN_ON(mapped > pgcount * pgsize);
pgcount -= mapped / pgsize;
*total_mapped += mapped;
} while (*total_mapped < size && !kvm_arm_smmu_topup_memcache(&res, gfp));
if (*total_mapped < size)
return -EINVAL;
return 0;
return kvm_iommu_map_pages(kvm_smmu_domain->id, iova, paddr, pgsize,
pgcount, prot, gfp, total_mapped);
}
static size_t kvm_arm_smmu_unmap_pages(struct iommu_domain *domain,
@@ -450,32 +390,9 @@ static size_t kvm_arm_smmu_unmap_pages(struct iommu_domain *domain,
size_t pgcount,
struct iommu_iotlb_gather *iotlb_gather)
{
size_t unmapped;
size_t total_unmapped = 0;
size_t size = pgsize * pgcount;
struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(domain);
struct arm_smccc_res res;
do {
res = kvm_call_hyp_nvhe_smccc(__pkvm_host_iommu_unmap_pages,
kvm_smmu_domain->id,
iova, pgsize, pgcount);
unmapped = res.a1;
total_unmapped += unmapped;
iova += unmapped;
WARN_ON(unmapped % pgsize);
pgcount -= unmapped / pgsize;
/*
* The page table driver can unmap less than we asked for. If it
* didn't unmap anything at all, then it either reached the end
* of the range, or it needs a page in the memcache to break a
* block mapping.
*/
} while (total_unmapped < size &&
(unmapped || !kvm_arm_smmu_topup_memcache(&res, GFP_ATOMIC)));
return total_unmapped;
return kvm_iommu_unmap_pages(kvm_smmu_domain->id, iova, pgsize, pgcount);
}
static phys_addr_t kvm_arm_smmu_iova_to_phys(struct iommu_domain *domain,
@@ -483,7 +400,7 @@ static phys_addr_t kvm_arm_smmu_iova_to_phys(struct iommu_domain *domain,
{
struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(domain);
return kvm_call_hyp_nvhe(__pkvm_host_iommu_iova_to_phys, kvm_smmu_domain->id, iova);
return kvm_iommu_iova_to_phys(kvm_smmu_domain->id, iova);
}
struct kvm_arm_smmu_map_sg {
@@ -543,32 +460,11 @@ static size_t kvm_arm_smmu_consume_deferred_map_sg(struct iommu_map_cookie_sg *c
struct kvm_arm_smmu_map_sg *map_sg = container_of(cookie, struct kvm_arm_smmu_map_sg,
cookie);
struct kvm_iommu_sg *sg = map_sg->sg;
size_t mapped, total_mapped = 0;
struct arm_smccc_res res;
struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(map_sg->cookie.domain);
size_t total_mapped;
do {
res = kvm_call_hyp_nvhe_smccc(__pkvm_host_iommu_map_sg,
kvm_smmu_domain->id,
map_sg->iova, sg, map_sg->ptr, map_sg->prot);
mapped = res.a1;
map_sg->iova += mapped;
total_mapped += mapped;
/* Skip mapped */
while (mapped) {
if (mapped < (sg->pgsize * sg->pgcount)) {
sg->phys += mapped;
sg->pgcount -= mapped / sg->pgsize;
mapped = 0;
} else {
mapped -= sg->pgsize * sg->pgcount;
sg++;
map_sg->ptr--;
}
}
kvm_arm_smmu_topup_memcache(&res, map_sg->gfp);
} while (map_sg->ptr);
total_mapped = kvm_iommu_map_sg(kvm_smmu_domain->id, sg, map_sg->iova, map_sg->ptr,
map_sg->prot, map_sg->gfp);
kvm_iommu_unshare_hyp_sg(sg, map_sg->nents);
kvm_iommu_sg_free(sg, map_sg->nents);
@@ -1178,9 +1074,8 @@ static int kvm_arm_smmu_v3_init_drv(void)
/* Preemptively allocate the identity domain. */
if (atomic_pages) {
ret = kvm_call_hyp_nvhe_mc(__pkvm_host_iommu_alloc_domain,
KVM_IOMMU_DOMAIN_IDMAP_ID,
KVM_IOMMU_DOMAIN_IDMAP_TYPE);
ret = kvm_iommu_alloc_domain(KVM_IOMMU_DOMAIN_IDMAP_ID,
KVM_IOMMU_DOMAIN_IDMAP_TYPE);
if (ret)
return ret;
}

View File

@@ -1246,7 +1246,7 @@ static int smmu_fix_up_domains(struct hyp_arm_smmu_v3_device *smmu,
}
static int smmu_attach_dev(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_domain *domain,
u32 sid, u32 pasid, u32 pasid_bits)
u32 sid, u32 pasid, u32 pasid_bits, unsigned long flags)
{
int i;
int ret;

View File

@@ -1330,7 +1330,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
mutex_lock(&ucsi->ppm_lock);
ret = ucsi->ops->read_cci(ucsi, &cci);
ret = ucsi->ops->poll_cci(ucsi, &cci);
if (ret < 0)
goto out;
@@ -1348,7 +1348,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS);
do {
ret = ucsi->ops->read_cci(ucsi, &cci);
ret = ucsi->ops->poll_cci(ucsi, &cci);
if (ret < 0)
goto out;
if (cci & UCSI_CCI_COMMAND_COMPLETE)
@@ -1377,7 +1377,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
/* Give the PPM time to process a reset before reading CCI */
msleep(20);
ret = ucsi->ops->read_cci(ucsi, &cci);
ret = ucsi->ops->poll_cci(ucsi, &cci);
if (ret)
goto out;
@@ -1913,8 +1913,8 @@ struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops)
struct ucsi *ucsi;
if (!ops ||
!ops->read_version || !ops->read_cci || !ops->read_message_in ||
!ops->sync_control || !ops->async_control)
!ops->read_version || !ops->read_cci || !ops->poll_cci ||
!ops->read_message_in || !ops->sync_control || !ops->async_control)
return ERR_PTR(-EINVAL);
ucsi = kzalloc(sizeof(*ucsi), GFP_KERNEL);

View File

@@ -60,6 +60,7 @@ struct dentry;
* struct ucsi_operations - UCSI I/O operations
* @read_version: Read implemented UCSI version
* @read_cci: Read CCI register
* @poll_cci: Read CCI register while polling with notifications disabled
* @read_message_in: Read message data from UCSI
* @sync_control: Blocking control operation
* @async_control: Non-blocking control operation
@@ -74,6 +75,7 @@ struct dentry;
struct ucsi_operations {
int (*read_version)(struct ucsi *ucsi, u16 *version);
int (*read_cci)(struct ucsi *ucsi, u32 *cci);
int (*poll_cci)(struct ucsi *ucsi, u32 *cci);
int (*read_message_in)(struct ucsi *ucsi, void *val, size_t val_len);
int (*sync_control)(struct ucsi *ucsi, u64 command);
int (*async_control)(struct ucsi *ucsi, u64 command);

View File

@@ -59,19 +59,24 @@ static int ucsi_acpi_read_version(struct ucsi *ucsi, u16 *version)
static int ucsi_acpi_read_cci(struct ucsi *ucsi, u32 *cci)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
int ret;
if (UCSI_COMMAND(ua->cmd) == UCSI_PPM_RESET) {
ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
if (ret)
return ret;
}
memcpy(cci, ua->base + UCSI_CCI, sizeof(*cci));
return 0;
}
static int ucsi_acpi_poll_cci(struct ucsi *ucsi, u32 *cci)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
int ret;
ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
if (ret)
return ret;
return ucsi_acpi_read_cci(ucsi, cci);
}
static int ucsi_acpi_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
@@ -94,6 +99,7 @@ static int ucsi_acpi_async_control(struct ucsi *ucsi, u64 command)
static const struct ucsi_operations ucsi_acpi_ops = {
.read_version = ucsi_acpi_read_version,
.read_cci = ucsi_acpi_read_cci,
.poll_cci = ucsi_acpi_poll_cci,
.read_message_in = ucsi_acpi_read_message_in,
.sync_control = ucsi_sync_control_common,
.async_control = ucsi_acpi_async_control
@@ -145,6 +151,7 @@ static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command)
static const struct ucsi_operations ucsi_gram_ops = {
.read_version = ucsi_acpi_read_version,
.read_cci = ucsi_acpi_read_cci,
.poll_cci = ucsi_acpi_poll_cci,
.read_message_in = ucsi_gram_read_message_in,
.sync_control = ucsi_gram_sync_control,
.async_control = ucsi_acpi_async_control

View File

@@ -664,6 +664,7 @@ err_put:
static const struct ucsi_operations ucsi_ccg_ops = {
.read_version = ucsi_ccg_read_version,
.read_cci = ucsi_ccg_read_cci,
.poll_cci = ucsi_ccg_read_cci,
.read_message_in = ucsi_ccg_read_message_in,
.sync_control = ucsi_ccg_sync_control,
.async_control = ucsi_ccg_async_control,

View File

@@ -201,6 +201,7 @@ static void pmic_glink_ucsi_connector_status(struct ucsi_connector *con)
static const struct ucsi_operations pmic_glink_ucsi_ops = {
.read_version = pmic_glink_ucsi_read_version,
.read_cci = pmic_glink_ucsi_read_cci,
.poll_cci = pmic_glink_ucsi_read_cci,
.read_message_in = pmic_glink_ucsi_read_message_in,
.sync_control = ucsi_sync_control_common,
.async_control = pmic_glink_ucsi_async_control,

View File

@@ -424,6 +424,7 @@ static irqreturn_t ucsi_stm32g0_irq_handler(int irq, void *data)
static const struct ucsi_operations ucsi_stm32g0_ops = {
.read_version = ucsi_stm32g0_read_version,
.read_cci = ucsi_stm32g0_read_cci,
.poll_cci = ucsi_stm32g0_read_cci,
.read_message_in = ucsi_stm32g0_read_message_in,
.sync_control = ucsi_sync_control_common,
.async_control = ucsi_stm32g0_async_control,

View File

@@ -74,6 +74,7 @@ static int yoga_c630_ucsi_async_control(struct ucsi *ucsi, u64 command)
const struct ucsi_operations yoga_c630_ucsi_ops = {
.read_version = yoga_c630_ucsi_read_version,
.read_cci = yoga_c630_ucsi_read_cci,
.poll_cci = yoga_c630_ucsi_read_cci,
.read_message_in = yoga_c630_ucsi_read_message_in,
.sync_control = ucsi_sync_control_common,
.async_control = yoga_c630_ucsi_async_control,

View File

@@ -277,6 +277,8 @@ static int gunyah_vcpu_run(struct gunyah_vcpu *vcpu)
schedule();
break;
case GUNYAH_VCPU_STATE_POWERED_OFF:
fallthrough;
case GUNYAH_VCPU_STATE_SYSTEM_OFF:
/**
* vcpu might be off because the VM is shut down
* If so, it won't ever run again
@@ -326,8 +328,8 @@ static int gunyah_vcpu_run(struct gunyah_vcpu *vcpu)
pr_warn_ratelimited(
"Unknown vCPU state: %llx\n",
vcpu_run_resp.sized_state);
schedule();
break;
ret = -EINVAL;
goto out;
}
} else if (gunyah_error == GUNYAH_ERROR_RETRY) {
schedule();

View File

@@ -548,7 +548,7 @@ void d_drop(struct dentry *dentry)
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
}
EXPORT_SYMBOL(d_drop);
EXPORT_SYMBOL_NS(d_drop, ANDROID_GKI_VFS_EXPORT_ONLY);
static inline void dentry_unlist(struct dentry *dentry)
{
@@ -930,7 +930,7 @@ repeat:
spin_unlock(&ret->d_lock);
return ret;
}
EXPORT_SYMBOL(dget_parent);
EXPORT_SYMBOL_NS(dget_parent, ANDROID_GKI_VFS_EXPORT_ONLY);
static struct dentry * __d_find_any_alias(struct inode *inode)
{
@@ -1812,7 +1812,7 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
dentry->d_flags |= DCACHE_OP_REAL;
}
EXPORT_SYMBOL(d_set_d_op);
EXPORT_SYMBOL_NS(d_set_d_op, ANDROID_GKI_VFS_EXPORT_ONLY);
static unsigned d_flags_for_inode(struct inode *inode)
{
@@ -2433,7 +2433,7 @@ void d_rehash(struct dentry * entry)
__d_rehash(entry);
spin_unlock(&entry->d_lock);
}
EXPORT_SYMBOL(d_rehash);
EXPORT_SYMBOL_NS(d_rehash, ANDROID_GKI_VFS_EXPORT_ONLY);
static inline unsigned start_dir_add(struct inode *dir)
{

View File

@@ -1642,7 +1642,7 @@ free_lock:
locks_free_lease(new_fl);
return error;
}
EXPORT_SYMBOL(__break_lease);
EXPORT_SYMBOL_NS(__break_lease, ANDROID_GKI_VFS_EXPORT_ONLY);
/**
* lease_get_mtime - update modified time of an inode with exclusive lease

View File

@@ -555,7 +555,7 @@ void path_get(const struct path *path)
mntget(path->mnt);
dget(path->dentry);
}
EXPORT_SYMBOL(path_get);
EXPORT_SYMBOL_NS(path_get, ANDROID_GKI_VFS_EXPORT_ONLY);
/**
* path_put - put a reference to a path
@@ -2762,7 +2762,7 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
putname(filename);
return ret;
}
EXPORT_SYMBOL(vfs_path_lookup);
EXPORT_SYMBOL_NS(vfs_path_lookup, ANDROID_GKI_VFS_EXPORT_ONLY);
static int lookup_one_common(struct mnt_idmap *idmap,
const char *name, struct dentry *base, int len,
@@ -3165,7 +3165,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
mutex_lock(&p1->d_sb->s_vfs_rename_mutex);
return lock_two_directories(p1, p2);
}
EXPORT_SYMBOL(lock_rename);
EXPORT_SYMBOL_NS(lock_rename, ANDROID_GKI_VFS_EXPORT_ONLY);
/*
* c1 and p2 should be on the same fs.
@@ -3217,7 +3217,7 @@ void unlock_rename(struct dentry *p1, struct dentry *p2)
mutex_unlock(&p1->d_sb->s_vfs_rename_mutex);
}
}
EXPORT_SYMBOL(unlock_rename);
EXPORT_SYMBOL_NS(unlock_rename, ANDROID_GKI_VFS_EXPORT_ONLY);
/**
* vfs_prepare_mode - prepare the mode to be used for a new inode

View File

@@ -1478,7 +1478,7 @@ struct vfsmount *mntget(struct vfsmount *mnt)
mnt_add_count(real_mount(mnt), 1);
return mnt;
}
EXPORT_SYMBOL(mntget);
EXPORT_SYMBOL_NS_GPL(mntget, ANDROID_GKI_VFS_EXPORT_ONLY);
/*
* Make a mount point inaccessible to new lookups.

View File

@@ -1118,7 +1118,7 @@ struct file *dentry_open(const struct path *path, int flags,
}
return f;
}
EXPORT_SYMBOL(dentry_open);
EXPORT_SYMBOL_NS(dentry_open, ANDROID_GKI_VFS_EXPORT_ONLY);
/**
* dentry_create - Create and open a file

View File

@@ -57,7 +57,7 @@ void fsstack_copy_inode_size(struct inode *dst, struct inode *src)
if (sizeof(i_size) > sizeof(long) || sizeof(i_blocks) > sizeof(long))
spin_unlock(&dst->i_lock);
}
EXPORT_SYMBOL_GPL(fsstack_copy_inode_size);
EXPORT_SYMBOL_NS_GPL(fsstack_copy_inode_size, ANDROID_GKI_VFS_EXPORT_ONLY);
/* copy all attributes */
void fsstack_copy_attr_all(struct inode *dest, const struct inode *src)

View File

@@ -206,7 +206,7 @@ int vfs_getattr(const struct path *path, struct kstat *stat,
return retval;
return vfs_getattr_nosec(path, stat, request_mask, query_flags);
}
EXPORT_SYMBOL(vfs_getattr);
EXPORT_SYMBOL_NS(vfs_getattr, ANDROID_GKI_VFS_EXPORT_ONLY);
/**
* vfs_fstat - Get the basic attributes by file descriptor

View File

@@ -92,7 +92,7 @@ int vfs_statfs(const struct path *path, struct kstatfs *buf)
buf->f_flags = calculate_f_flags(path->mnt);
return error;
}
EXPORT_SYMBOL(vfs_statfs);
EXPORT_SYMBOL_NS(vfs_statfs, ANDROID_GKI_VFS_EXPORT_ONLY);
int user_statfs(const char __user *pathname, struct kstatfs *st)
{

View File

@@ -1738,7 +1738,7 @@ struct dentry *mount_nodev(struct file_system_type *fs_type,
s->s_flags |= SB_ACTIVE;
return dget(s->s_root);
}
EXPORT_SYMBOL(mount_nodev);
EXPORT_SYMBOL_NS(mount_nodev, ANDROID_GKI_VFS_EXPORT_ONLY);
int reconfigure_single(struct super_block *s,
int flags, void *data)

File diff suppressed because it is too large Load Diff

View File

@@ -63,40 +63,6 @@ btbcm_set_bdaddr
btbcm_setup_apple
btbcm_setup_patchram
btbcm_write_pcm_int_params
btintel_bootloader_setup_tlv
btintel_bootup
btintel_check_bdaddr
btintel_configure_setup
btintel_diagnostics
btintel_download_firmware
btintel_enter_mfg
btintel_exit_mfg
btintel_hw_error
btintel_load_ddc_config
btintel_parse_version_tlv
btintel_print_fseq_info
btintel_read_boot_params
btintel_read_version
btintel_recv_event
btintel_regmap_init
btintel_secure_send_result
btintel_send_intel_reset
btintel_set_bdaddr
btintel_set_diag
btintel_set_event_mask_mfg
btintel_set_msft_opcode
btintel_set_quality_report
btintel_shutdown_combined
btintel_version_info
btintel_version_info_tlv
btrtl_download_firmware
btrtl_free
btrtl_get_uart_settings
btrtl_initialize
btrtl_set_driver_name
btrtl_set_quirks
btrtl_setup_realtek
btrtl_shutdown_realtek
can_bus_off
can_change_mtu
can_change_state
@@ -188,6 +154,9 @@ hci_devcd_init
hci_devcd_register
hci_devcd_rx
hci_devcd_timeout
hci_drv_cmd_complete
hci_drv_cmd_status
hci_drv_process_cmd
hci_free_dev
hci_get_route
hci_mgmt_chan_register

View File

@@ -752,6 +752,7 @@
divider_ro_round_rate_parent
divider_round_rate_parent
dma_alloc_attrs
dma_alloc_noncontiguous
dma_alloc_pages
dma_async_device_register
dma_async_device_unregister
@@ -792,6 +793,7 @@
dma_fence_signal_timestamp_locked
dma_fence_wait_timeout
dma_free_attrs
dma_free_noncontiguous
dma_free_pages
dma_get_sgtable_attrs
dma_get_slave_channel
@@ -826,6 +828,8 @@
dma_unmap_page_attrs
dma_unmap_resource
dma_unmap_sg_attrs
dma_vmap_noncontiguous
dma_vunmap_noncontiguous
do_trace_netlink_extack
do_trace_rcu_torture_read
double_rq_lock

View File

@@ -598,6 +598,8 @@
gpiod_set_value_cansleep
hci_cmd_sync_cancel
hci_devcd_append_pattern
hci_drv_cmd_status
hci_drv_cmd_complete
hci_recv_diag
irq_modify_status
of_irq_get_byname

View File

@@ -115,6 +115,8 @@
__traceiter_android_vh_irqtime_account_process_tick
__traceiter_android_vh_lock_folio_drop_mmap_end
__traceiter_android_vh_lock_folio_drop_mmap_start
__traceiter_android_vh_lruvec_add_folio
__traceiter_android_vh_lruvec_del_folio
__traceiter_android_vh_mglru_aging_bypass
__traceiter_android_vh_mutex_unlock_slowpath
__traceiter_android_vh_mutex_unlock_slowpath_before_wakeq
@@ -196,6 +198,8 @@
__tracepoint_android_vh_irqtime_account_process_tick
__tracepoint_android_vh_lock_folio_drop_mmap_end
__tracepoint_android_vh_lock_folio_drop_mmap_start
__tracepoint_android_vh_lruvec_add_folio
__tracepoint_android_vh_lruvec_del_folio
__tracepoint_android_vh_mglru_aging_bypass
__tracepoint_android_vh_mutex_unlock_slowpath
__tracepoint_android_vh_mutex_unlock_slowpath_before_wakeq

View File

@@ -0,0 +1,26 @@
/* SPDX-License-Identifier: MIT */
#ifndef DRM_CLIENT_SETUP_H
#define DRM_CLIENT_SETUP_H
#include <linux/types.h>
struct drm_device;
struct drm_format_info;
#if defined(CONFIG_DRM_CLIENT_SETUP)
void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format);
void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc);
void drm_client_setup_with_color_mode(struct drm_device *dev, unsigned int color_mode);
#else
static inline void drm_client_setup(struct drm_device *dev,
const struct drm_format_info *format)
{ }
static inline void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc)
{ }
static inline void drm_client_setup_with_color_mode(struct drm_device *dev,
unsigned int color_mode)
{ }
#endif
#endif

View File

@@ -34,6 +34,8 @@
#include <drm/drm_device.h>
struct drm_fb_helper;
struct drm_fb_helper_surface_size;
struct drm_file;
struct drm_gem_object;
struct drm_master;
@@ -366,6 +368,22 @@ struct drm_driver {
struct drm_device *dev, uint32_t handle,
uint64_t *offset);
/**
* @fbdev_probe
*
* Allocates and initialize the fb_info structure for fbdev emulation.
* Furthermore it also needs to allocate the DRM framebuffer used to
* back the fbdev.
*
* This callback is mandatory for fbdev support.
*
* Returns:
*
* 0 on success ot a negative error code otherwise.
*/
int (*fbdev_probe)(struct drm_fb_helper *fbdev_helper,
struct drm_fb_helper_surface_size *sizes);
/**
* @show_fdinfo:
*

View File

@@ -0,0 +1,19 @@
/* SPDX-License-Identifier: MIT */
#ifndef DRM_FBDEV_CLIENT_H
#define DRM_FBDEV_CLIENT_H
struct drm_device;
struct drm_format_info;
#ifdef CONFIG_DRM_FBDEV_EMULATION
int drm_fbdev_client_setup(struct drm_device *dev, const struct drm_format_info *format);
#else
static inline int drm_fbdev_client_setup(struct drm_device *dev,
const struct drm_format_info *format)
{
return 0;
}
#endif
#endif

View File

@@ -3,11 +3,24 @@
#ifndef DRM_FBDEV_TTM_H
#define DRM_FBDEV_TTM_H
#include <linux/stddef.h>
struct drm_device;
struct drm_fb_helper;
struct drm_fb_helper_surface_size;
#ifdef CONFIG_DRM_FBDEV_EMULATION
int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes);
#define DRM_FBDEV_TTM_DRIVER_OPS \
.fbdev_probe = drm_fbdev_ttm_driver_fbdev_probe
void drm_fbdev_ttm_setup(struct drm_device *dev, unsigned int preferred_bpp);
#else
#define DRM_FBDEV_TTM_DRIVER_OPS \
.fbdev_probe = NULL
static inline void drm_fbdev_ttm_setup(struct drm_device *dev, unsigned int preferred_bpp)
{ }
#endif

View File

@@ -2,7 +2,7 @@
/*
* android_kabi.h - Android kernel abi abstraction header
*
* Copyright (C) 2020 Google, Inc.
* Copyright (C) 2020-2025 Google, Inc.
*
* Heavily influenced by rh_kabi.h which came from the RHEL/CENTOS kernel and
* was:
@@ -29,49 +29,49 @@
* If a field is added to a structure, the padding fields can be used to add
* the new field in a "safe" way.
*/
#ifndef _ANDROID_KABI_H
#define _ANDROID_KABI_H
#include <linux/args.h>
#include <linux/compiler.h>
#include <linux/compiler_attributes.h>
#include <linux/stringify.h>
/*
* Worker macros, don't use these, use the ones without a leading '_'
*/
#define __ANDROID_KABI_CHECK_SIZE_ALIGN(_orig, _new) \
union { \
_Static_assert(sizeof(struct{_new;}) <= sizeof(struct{_orig;}), \
__FILE__ ":" __stringify(__LINE__) ": " \
__stringify(_new) \
" is larger than " \
__stringify(_orig) ); \
_Static_assert(__alignof__(struct{_new;}) <= __alignof__(struct{_orig;}), \
__FILE__ ":" __stringify(__LINE__) ": " \
__stringify(_orig) \
" is not aligned the same as " \
__stringify(_new) ); \
#define _ANDROID_KABI_RULE(hint, target, value) \
static const char CONCATENATE(__gendwarfksyms_rule_, \
__COUNTER__)[] __used __aligned(1) \
__section(".discard.gendwarfksyms.kabi_rules") = \
"1\0" #hint "\0" #target "\0" #value
#define _ANDROID_KABI_NORMAL_SIZE_ALIGN(_orig, _new) \
union { \
_Static_assert( \
sizeof(struct { _new; }) <= \
sizeof(struct { _orig; }), \
FILE_LINE ": " __stringify(_new) \
" is larger than " __stringify(_orig)); \
_Static_assert( \
__alignof__(struct { _new; }) <= \
__alignof__(struct { _orig; }), \
FILE_LINE ": " __stringify(_orig) \
" is not aligned the same as " \
__stringify(_new)); \
}
#ifdef __GENKSYMS__
#define _ANDROID_KABI_REPLACE(_orig, _new) _orig
#else
#define _ANDROID_KABI_REPLACE(_orig, _new) \
union { \
_new; \
struct { \
_orig; \
}; \
__ANDROID_KABI_CHECK_SIZE_ALIGN(_orig, _new); \
#define _ANDROID_KABI_REPLACE(_orig, _new) \
union { \
_new; \
struct { \
_orig; \
}; \
_ANDROID_KABI_NORMAL_SIZE_ALIGN(_orig, _new); \
}
#endif /* __GENKSYMS__ */
#define _ANDROID_KABI_RESERVE(n) u64 android_kabi_reserved##n
/*
* Macros to use _before_ the ABI is frozen
@@ -84,44 +84,61 @@
* number: the "number" of the padding variable in the structure. Start with
* 1 and go up.
*/
#ifdef CONFIG_ANDROID_KABI_RESERVE
#define ANDROID_KABI_RESERVE(number) _ANDROID_KABI_RESERVE(number)
#else
#define ANDROID_KABI_RESERVE(number)
#endif
/*
* ANDROID_KABI_BACKPORT_OK
* Used to allow padding originally reserved with ANDROID_KABI_RESERVE
* to be used for backports of non-LTS patches by partners. These
* fields can by used by replacing with ANDROID_KABI_BACKPORT_USE()
* for partner backports.
*/
#define ANDROID_KABI_BACKPORT_OK(number) ANDROID_KABI_RESERVE(number)
#define ANDROID_KABI_RESERVE(number) u64 __kabi_reserved##number
/*
* Macros to use _after_ the ABI is frozen
*/
/*
* ANDROID_KABI_DECLONLY(fqn)
* Treat the struct/union/enum fqn as a declaration, i.e. even if
* a definition is available, don't expand the contents.
*/
#define ANDROID_KABI_DECLONLY(fqn) _ANDROID_KABI_RULE(declonly, fqn, /**/)
/*
* ANDROID_KABI_ENUMERATOR_IGNORE(fqn, field)
* When expanding enum fqn, skip the provided field. This makes it
* possible to hide added enum fields from versioning.
*/
#define ANDROID_KABI_ENUMERATOR_IGNORE(fqn, field) \
_ANDROID_KABI_RULE(enumerator_ignore, fqn field, /**/)
/*
* ANDROID_KABI_ENUMERATOR_VALUE(fqn, field, value)
* When expanding enum fqn, use the provided value for the
* specified field. This makes it possible to override enumerator
* values when calculating versions.
*/
#define ANDROID_KABI_ENUMERATOR_VALUE(fqn, field, value) \
_ANDROID_KABI_RULE(enumerator_value, fqn field, value)
/*
* ANDROID_KABI_IGNORE
* Add a new field that's ignored in versioning.
*/
#define ANDROID_KABI_IGNORE(n, _new) \
union { \
_new; \
unsigned char __kabi_ignored##n; \
}
/*
* ANDROID_KABI_REPLACE
* Replace a field with a compatible new field.
*/
#define ANDROID_KABI_REPLACE(_oldtype, _oldname, _new) \
_ANDROID_KABI_REPLACE(_oldtype __kabi_renamed##_oldname, struct { _new; })
/*
* ANDROID_KABI_USE(number, _new)
* Use a previous padding entry that was defined with ANDROID_KABI_RESERVE
* number: the previous "number" of the padding variable
* _new: the variable to use now instead of the padding variable
*/
#define ANDROID_KABI_USE(number, _new) \
_ANDROID_KABI_REPLACE(_ANDROID_KABI_RESERVE(number), _new)
/*
* ANDROID_KABI_BACKPORT_USE(number, _new)
* Use a previous padding entry that was defined with
* ANDROID_KABI_BACKPORT_OK(). This is functionally identical
* to ANDROID_KABI_USE() except that it differentiates the
* normal use of KABI fields for LTS from KABI fields that
* were released for use with other backports from upstream.
*/
#define ANDROID_KABI_BACKPORT_USE(number, _new) \
ANDROID_KABI_USE(number, _new)
#define ANDROID_KABI_USE(number, _new) \
_ANDROID_KABI_REPLACE(ANDROID_KABI_RESERVE(number), _new)
/*
* ANDROID_KABI_USE2(number, _new1, _new2)
@@ -130,8 +147,8 @@
* want to "burn" a 64bit padding variable for a smaller variable size if not
* needed.
*/
#define ANDROID_KABI_USE2(number, _new1, _new2) \
_ANDROID_KABI_REPLACE(_ANDROID_KABI_RESERVE(number), struct{ _new1; _new2; })
#define ANDROID_KABI_USE2(number, _new1, _new2) \
_ANDROID_KABI_REPLACE(ANDROID_KABI_RESERVE(number), struct{ _new1; _new2; })
#endif /* _ANDROID_KABI_H */

View File

@@ -324,6 +324,7 @@ struct cgroup_base_stat {
#ifdef CONFIG_SCHED_CORE
u64 forceidle_sum;
#endif
u64 ntime;
};
/*

View File

@@ -55,6 +55,8 @@ struct em_perf_table {
* struct em_perf_domain - Performance domain
* @em_table: Pointer to the runtime modifiable em_perf_table
* @nr_perf_states: Number of performance states
* @min_perf_state: Minimum allowed Performance State index
* @max_perf_state: Maximum allowed Performance State index
* @flags: See "em_perf_domain flags"
* @cpus: Cpumask covering the CPUs of the domain. It's here
* for performance reasons to avoid potential cache
@@ -70,6 +72,8 @@ struct em_perf_table {
struct em_perf_domain {
struct em_perf_table __rcu *em_table;
int nr_perf_states;
int min_perf_state;
int max_perf_state;
unsigned long flags;
unsigned long cpus[];
};
@@ -173,13 +177,14 @@ void em_table_free(struct em_perf_table __rcu *table);
int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
int nr_states);
int em_dev_update_chip_binning(struct device *dev);
int em_update_performance_limits(struct em_perf_domain *pd,
unsigned long freq_min_khz, unsigned long freq_max_khz);
/**
* em_pd_get_efficient_state() - Get an efficient performance state from the EM
* @table: List of performance states, in ascending order
* @nr_perf_states: Number of performance states
* @pd: performance domain for which this must be done
* @max_util: Max utilization to map with the EM
* @pd_flags: Performance Domain flags
*
* It is called from the scheduler code quite frequently and as a consequence
* doesn't implement any check.
@@ -188,13 +193,16 @@ int em_dev_update_chip_binning(struct device *dev);
* requirement.
*/
static inline int
em_pd_get_efficient_state(struct em_perf_state *table, int nr_perf_states,
unsigned long max_util, unsigned long pd_flags)
em_pd_get_efficient_state(struct em_perf_state *table,
struct em_perf_domain *pd, unsigned long max_util)
{
unsigned long pd_flags = pd->flags;
int min_ps = pd->min_perf_state;
int max_ps = pd->max_perf_state;
struct em_perf_state *ps;
int i;
for (i = 0; i < nr_perf_states; i++) {
for (i = min_ps; i <= max_ps; i++) {
ps = &table[i];
if (ps->performance >= max_util) {
if (pd_flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES &&
@@ -204,7 +212,7 @@ em_pd_get_efficient_state(struct em_perf_state *table, int nr_perf_states,
}
}
return nr_perf_states - 1;
return max_ps;
}
/**
@@ -253,8 +261,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
* requested performance.
*/
em_table = rcu_dereference(pd->em_table);
i = em_pd_get_efficient_state(em_table->state, pd->nr_perf_states,
max_util, pd->flags);
i = em_pd_get_efficient_state(em_table->state, pd, max_util);
ps = &em_table->state[i];
/*
@@ -391,6 +398,12 @@ static inline int em_dev_update_chip_binning(struct device *dev)
{
return -EINVAL;
}
static inline
int em_update_performance_limits(struct em_perf_domain *pd,
unsigned long freq_min_khz, unsigned long freq_max_khz)
{
return -EINVAL;
}
#endif
#endif

View File

@@ -91,7 +91,7 @@ struct fwnode_endpoint {
#define SWNODE_GRAPH_PORT_NAME_FMT "port@%u"
#define SWNODE_GRAPH_ENDPOINT_NAME_FMT "endpoint@%u"
#define NR_FWNODE_REFERENCE_ARGS 8
#define NR_FWNODE_REFERENCE_ARGS 16
/**
* struct fwnode_reference_args - Fwnode reference with additional arguments

View File

@@ -526,6 +526,8 @@ struct gunyah_hypercall_vcpu_run_resp {
GUNYAH_VCPU_ADDRSPACE_VMMIO_WRITE = 5,
/* VCPU blocked on fault where we can demand page */
GUNYAH_VCPU_ADDRSPACE_PAGE_FAULT = 7,
/* VCPU is powered off due to some system event/reset */
GUNYAH_VCPU_STATE_SYSTEM_OFF = 0x100,
/* clang-format on */
} state;
u64 sized_state;

View File

@@ -323,7 +323,7 @@ struct mem_cgroup {
spinlock_t event_list_lock;
#endif /* CONFIG_MEMCG_V1 */
ANDROID_OEM_DATA(1);
ANDROID_OEM_DATA_ARRAY(1, 2);
struct mem_cgroup_per_node *nodeinfo[];
};

View File

@@ -10,6 +10,9 @@
#include <linux/userfaultfd_k.h>
#include <linux/swapops.h>
#undef CREATE_TRACE_POINTS
#include <trace/hooks/mm.h>
/**
* folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
* @folio: The folio to test.
@@ -340,6 +343,11 @@ static __always_inline
void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
{
enum lru_list lru = folio_lru_list(folio);
bool skip = false;
trace_android_vh_lruvec_add_folio(lruvec, folio, lru, false, &skip);
if (skip)
return;
if (lru_gen_add_folio(lruvec, folio, false))
return;
@@ -354,6 +362,11 @@ static __always_inline
void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
{
enum lru_list lru = folio_lru_list(folio);
bool skip = false;
trace_android_vh_lruvec_add_folio(lruvec, folio, lru, true, &skip);
if (skip)
return;
if (lru_gen_add_folio(lruvec, folio, true))
return;
@@ -368,6 +381,11 @@ static __always_inline
void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
{
enum lru_list lru = folio_lru_list(folio);
bool skip = false;
trace_android_vh_lruvec_del_folio(lruvec, folio, lru, &skip);
if (skip)
return;
if (lru_gen_del_folio(lruvec, folio, false))
return;

View File

@@ -18,6 +18,7 @@ struct percpu_rw_semaphore {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
ANDROID_OEM_DATA(1);
};
void _trace_android_vh_record_pcpu_rwsem_starttime(

View File

@@ -470,6 +470,7 @@ enum {
#define HCI_EVENT_PKT 0x04
#define HCI_ISODATA_PKT 0x05
#define HCI_DIAG_PKT 0xf0
#define HCI_DRV_PKT 0xf1
#define HCI_VENDOR_PKT 0xff
/* HCI packet types */

View File

@@ -31,6 +31,7 @@
#include <linux/rculist.h>
#include <net/bluetooth/hci.h>
#include <net/bluetooth/hci_drv.h>
#include <net/bluetooth/hci_sync.h>
#include <net/bluetooth/hci_sock.h>
#include <net/bluetooth/coredump.h>
@@ -606,6 +607,8 @@ struct hci_dev {
struct list_head monitored_devices;
bool advmon_pend_notify;
struct hci_drv *hci_drv;
#if IS_ENABLED(CONFIG_BT_LEDS)
struct led_trigger *power_led;
#endif
@@ -803,6 +806,7 @@ struct hci_conn_params {
extern struct list_head hci_dev_list;
extern struct list_head hci_cb_list;
extern rwlock_t hci_dev_list_lock;
extern struct mutex hci_cb_list_lock;
#define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags)
#define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags)
@@ -2005,47 +2009,24 @@ struct hci_cb {
char *name;
bool (*match) (struct hci_conn *conn);
void (*connect_cfm) (struct hci_conn *conn, __u8 status);
void (*disconn_cfm) (struct hci_conn *conn, __u8 status);
void (*security_cfm) (struct hci_conn *conn, __u8 status,
__u8 encrypt);
__u8 encrypt);
void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
};
static inline void hci_cb_lookup(struct hci_conn *conn, struct list_head *list)
{
struct hci_cb *cb, *cpy;
rcu_read_lock();
list_for_each_entry_rcu(cb, &hci_cb_list, list) {
if (cb->match && cb->match(conn)) {
cpy = kmalloc(sizeof(*cpy), GFP_ATOMIC);
if (!cpy)
break;
*cpy = *cb;
INIT_LIST_HEAD(&cpy->list);
list_add_rcu(&cpy->list, list);
}
}
rcu_read_unlock();
}
static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
{
struct list_head list;
struct hci_cb *cb, *tmp;
struct hci_cb *cb;
INIT_LIST_HEAD(&list);
hci_cb_lookup(conn, &list);
list_for_each_entry_safe(cb, tmp, &list, list) {
mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->connect_cfm)
cb->connect_cfm(conn, status);
kfree(cb);
}
mutex_unlock(&hci_cb_list_lock);
if (conn->connect_cfm_cb)
conn->connect_cfm_cb(conn, status);
@@ -2053,43 +2034,22 @@ static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason)
{
struct list_head list;
struct hci_cb *cb, *tmp;
struct hci_cb *cb;
INIT_LIST_HEAD(&list);
hci_cb_lookup(conn, &list);
list_for_each_entry_safe(cb, tmp, &list, list) {
mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->disconn_cfm)
cb->disconn_cfm(conn, reason);
kfree(cb);
}
mutex_unlock(&hci_cb_list_lock);
if (conn->disconn_cfm_cb)
conn->disconn_cfm_cb(conn, reason);
}
static inline void hci_security_cfm(struct hci_conn *conn, __u8 status,
__u8 encrypt)
{
struct list_head list;
struct hci_cb *cb, *tmp;
INIT_LIST_HEAD(&list);
hci_cb_lookup(conn, &list);
list_for_each_entry_safe(cb, tmp, &list, list) {
if (cb->security_cfm)
cb->security_cfm(conn, status, encrypt);
kfree(cb);
}
if (conn->security_cfm_cb)
conn->security_cfm_cb(conn, status);
}
static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
{
struct hci_cb *cb;
__u8 encrypt;
if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
@@ -2097,11 +2057,20 @@ static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
hci_security_cfm(conn, status, encrypt);
mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->security_cfm)
cb->security_cfm(conn, status, encrypt);
}
mutex_unlock(&hci_cb_list_lock);
if (conn->security_cfm_cb)
conn->security_cfm_cb(conn, status);
}
static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
{
struct hci_cb *cb;
__u8 encrypt;
if (conn->state == BT_CONFIG) {
@@ -2128,38 +2097,40 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
conn->sec_level = conn->pending_sec_level;
}
hci_security_cfm(conn, status, encrypt);
mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->security_cfm)
cb->security_cfm(conn, status, encrypt);
}
mutex_unlock(&hci_cb_list_lock);
if (conn->security_cfm_cb)
conn->security_cfm_cb(conn, status);
}
static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
{
struct list_head list;
struct hci_cb *cb, *tmp;
struct hci_cb *cb;
INIT_LIST_HEAD(&list);
hci_cb_lookup(conn, &list);
list_for_each_entry_safe(cb, tmp, &list, list) {
mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->key_change_cfm)
cb->key_change_cfm(conn, status);
kfree(cb);
}
mutex_unlock(&hci_cb_list_lock);
}
static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
__u8 role)
{
struct list_head list;
struct hci_cb *cb, *tmp;
struct hci_cb *cb;
INIT_LIST_HEAD(&list);
hci_cb_lookup(conn, &list);
list_for_each_entry_safe(cb, tmp, &list, list) {
mutex_lock(&hci_cb_list_lock);
list_for_each_entry(cb, &hci_cb_list, list) {
if (cb->role_switch_cfm)
cb->role_switch_cfm(conn, status, role);
kfree(cb);
}
mutex_unlock(&hci_cb_list_lock);
}
static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)

View File

@@ -0,0 +1,76 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2025 Google Corporation
*/
#ifndef __HCI_DRV_H
#define __HCI_DRV_H
#include <linux/types.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci.h>
struct hci_drv_cmd_hdr {
__le16 opcode;
__le16 len;
} __packed;
struct hci_drv_ev_hdr {
__le16 opcode;
__le16 len;
} __packed;
#define HCI_DRV_EV_CMD_STATUS 0x0000
struct hci_drv_ev_cmd_status {
__le16 opcode;
__u8 status;
} __packed;
#define HCI_DRV_EV_CMD_COMPLETE 0x0001
struct hci_drv_ev_cmd_complete {
__le16 opcode;
__u8 status;
__u8 data[];
} __packed;
#define HCI_DRV_STATUS_SUCCESS 0x00
#define HCI_DRV_STATUS_UNSPECIFIED_ERROR 0x01
#define HCI_DRV_STATUS_UNKNOWN_COMMAND 0x02
#define HCI_DRV_STATUS_INVALID_PARAMETERS 0x03
#define HCI_DRV_MAX_DRIVER_NAME_LENGTH 32
/* Common commands that make sense on all drivers start from 0x0000 */
#define HCI_DRV_OP_READ_INFO 0x0000
#define HCI_DRV_READ_INFO_SIZE 0
struct hci_drv_rp_read_info {
__u8 driver_name[HCI_DRV_MAX_DRIVER_NAME_LENGTH];
__le16 num_supported_commands;
__le16 supported_commands[];
} __packed;
/* Driver specific OGF (Opcode Group Field)
* Commands in this group may have different meanings across different drivers.
*/
#define HCI_DRV_OGF_DRIVER_SPECIFIC 0x01
int hci_drv_cmd_status(struct hci_dev *hdev, u16 cmd, u8 status);
int hci_drv_cmd_complete(struct hci_dev *hdev, u16 cmd, u8 status, void *rp,
size_t rp_len);
int hci_drv_process_cmd(struct hci_dev *hdev, struct sk_buff *cmd_skb);
struct hci_drv_handler {
int (*func)(struct hci_dev *hdev, void *data, u16 data_len);
size_t data_len;
};
struct hci_drv {
size_t common_handler_count;
const struct hci_drv_handler *common_handlers;
size_t specific_handler_count;
const struct hci_drv_handler *specific_handlers;
};
#endif /* __HCI_DRV_H */

View File

@@ -51,6 +51,8 @@ struct hci_mon_hdr {
#define HCI_MON_CTRL_EVENT 17
#define HCI_MON_ISO_TX_PKT 18
#define HCI_MON_ISO_RX_PKT 19
#define HCI_MON_DRV_TX_PKT 20
#define HCI_MON_DRV_RX_PKT 21
struct hci_mon_new_index {
__u8 type;

View File

@@ -668,7 +668,7 @@ struct l2cap_conn {
struct l2cap_chan *smp;
struct list_head chan_l;
struct mutex chan_lock;
struct mutex lock;
struct kref ref;
struct list_head users;
};
@@ -970,6 +970,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err);
void l2cap_send_conn_req(struct l2cap_chan *chan);
struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn);
struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *conn);
void l2cap_conn_put(struct l2cap_conn *conn);
int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user);

View File

@@ -2737,6 +2737,7 @@ struct ib_device {
* It is a NULL terminated array.
*/
const struct attribute_group *groups[4];
u8 hw_stats_attr_index;
u64 uverbs_cmd_mask;

View File

@@ -351,9 +351,9 @@ DECLARE_HOOK(android_vh_filemap_read,
TP_PROTO(struct file *file, loff_t pos, size_t size),
TP_ARGS(file, pos, size));
DECLARE_HOOK(android_vh_filemap_map_pages,
TP_PROTO(struct file *file, pgoff_t first_pgoff,
TP_PROTO(struct file *file, pgoff_t orig_start_pgoff, pgoff_t first_pgoff,
pgoff_t last_pgoff, vm_fault_t ret),
TP_ARGS(file, first_pgoff, last_pgoff, ret));
TP_ARGS(file, orig_start_pgoff, first_pgoff, last_pgoff, ret));
DECLARE_HOOK(android_vh_page_cache_readahead_start,
TP_PROTO(struct file *file, pgoff_t pgoff,
unsigned int size, bool sync),
@@ -456,6 +456,14 @@ DECLARE_HOOK(android_vh_compact_finished,
DECLARE_HOOK(android_vh_madvise_cold_or_pageout_abort,
TP_PROTO(struct vm_area_struct *vma, bool *abort_madvise),
TP_ARGS(vma, abort_madvise));
DECLARE_HOOK(android_vh_lruvec_add_folio,
TP_PROTO(struct lruvec *lruvec, struct folio *folio, enum lru_list lru,
bool tail, bool *skip),
TP_ARGS(lruvec, folio, lru, tail, skip));
DECLARE_HOOK(android_vh_lruvec_del_folio,
TP_PROTO(struct lruvec *lruvec, struct folio *folio, enum lru_list lru,
bool *skip),
TP_ARGS(lruvec, folio, lru, skip));
#endif /* _TRACE_HOOK_MM_H */
/* This part must be outside protection */

View File

@@ -3,6 +3,7 @@
# Makefile for the linux kernel.
#
CFLAGS_cred.o = -DANDROID_GKI_VFS_EXPORT_ONLY=VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
obj-y = fork.o exec_domain.o panic.o \
cpu.o exit.o softirq.o resource.o \
sysctl.o capability.o ptrace.o user.o \

View File

@@ -444,6 +444,7 @@ static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat,
#ifdef CONFIG_SCHED_CORE
dst_bstat->forceidle_sum += src_bstat->forceidle_sum;
#endif
dst_bstat->ntime += src_bstat->ntime;
}
static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat,
@@ -455,6 +456,7 @@ static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat,
#ifdef CONFIG_SCHED_CORE
dst_bstat->forceidle_sum -= src_bstat->forceidle_sum;
#endif
dst_bstat->ntime -= src_bstat->ntime;
}
static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
@@ -534,8 +536,10 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp,
rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
switch (index) {
case CPUTIME_USER:
case CPUTIME_NICE:
rstatc->bstat.ntime += delta_exec;
fallthrough;
case CPUTIME_USER:
rstatc->bstat.cputime.utime += delta_exec;
break;
case CPUTIME_SYSTEM:
@@ -590,6 +594,7 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat)
#ifdef CONFIG_SCHED_CORE
bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE];
#endif
bstat->ntime += cpustat[CPUTIME_NICE];
}
}
@@ -607,32 +612,33 @@ static void cgroup_force_idle_show(struct seq_file *seq, struct cgroup_base_stat
void cgroup_base_stat_cputime_show(struct seq_file *seq)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
u64 usage, utime, stime;
struct cgroup_base_stat bstat;
if (cgroup_parent(cgrp)) {
cgroup_rstat_flush_hold(cgrp);
usage = cgrp->bstat.cputime.sum_exec_runtime;
bstat = cgrp->bstat;
cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
&utime, &stime);
&bstat.cputime.utime, &bstat.cputime.stime);
cgroup_rstat_flush_release(cgrp);
} else {
/* cgrp->bstat of root is not actually used, reuse it */
root_cgroup_cputime(&cgrp->bstat);
usage = cgrp->bstat.cputime.sum_exec_runtime;
utime = cgrp->bstat.cputime.utime;
stime = cgrp->bstat.cputime.stime;
root_cgroup_cputime(&bstat);
}
do_div(usage, NSEC_PER_USEC);
do_div(utime, NSEC_PER_USEC);
do_div(stime, NSEC_PER_USEC);
do_div(bstat.cputime.sum_exec_runtime, NSEC_PER_USEC);
do_div(bstat.cputime.utime, NSEC_PER_USEC);
do_div(bstat.cputime.stime, NSEC_PER_USEC);
do_div(bstat.ntime, NSEC_PER_USEC);
seq_printf(seq, "usage_usec %llu\n"
"user_usec %llu\n"
"system_usec %llu\n",
usage, utime, stime);
"user_usec %llu\n"
"system_usec %llu\n"
"nice_usec %llu\n",
bstat.cputime.sum_exec_runtime,
bstat.cputime.utime,
bstat.cputime.stime,
bstat.ntime);
cgroup_force_idle_show(seq, &cgrp->bstat);
cgroup_force_idle_show(seq, &bstat);
}
/* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */

View File

@@ -251,7 +251,7 @@ error:
abort_creds(new);
return NULL;
}
EXPORT_SYMBOL(prepare_creds);
EXPORT_SYMBOL_NS(prepare_creds, ANDROID_GKI_VFS_EXPORT_ONLY);
/*
* Prepare credentials for current to perform an execve()
@@ -510,7 +510,7 @@ const struct cred *override_creds(const struct cred *new)
atomic_long_read(&old->usage));
return old;
}
EXPORT_SYMBOL(override_creds);
EXPORT_SYMBOL_NS(override_creds, ANDROID_GKI_VFS_EXPORT_ONLY);
/**
* revert_creds - Revert a temporary subjective credentials override
@@ -530,7 +530,7 @@ void revert_creds(const struct cred *old)
trace_android_rvh_revert_creds(current, old);
put_cred(override);
}
EXPORT_SYMBOL(revert_creds);
EXPORT_SYMBOL_NS(revert_creds, ANDROID_GKI_VFS_EXPORT_ONLY);
/**
* cred_fscmp - Compare two credentials with respect to filesystem access.

View File

@@ -628,6 +628,8 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
goto unlock;
dev->em_pd->flags |= flags;
dev->em_pd->min_perf_state = 0;
dev->em_pd->max_perf_state = nr_states - 1;
em_cpufreq_update_efficiencies(dev, dev->em_pd->em_table->state);
@@ -856,3 +858,53 @@ int em_dev_update_chip_binning(struct device *dev)
return em_recalc_and_update(dev, pd, em_table);
}
EXPORT_SYMBOL_GPL(em_dev_update_chip_binning);
/**
* em_update_performance_limits() - Update Energy Model with performance
* limits information.
* @pd : Performance Domain with EM that has to be updated.
* @freq_min_khz : New minimum allowed frequency for this device.
* @freq_max_khz : New maximum allowed frequency for this device.
*
* This function allows to update the EM with information about available
* performance levels. It takes the minimum and maximum frequency in kHz
* and does internal translation to performance levels.
* Returns 0 on success or -EINVAL when failed.
*/
int em_update_performance_limits(struct em_perf_domain *pd,
unsigned long freq_min_khz, unsigned long freq_max_khz)
{
struct em_perf_state *table;
int min_ps = -1;
int max_ps = -1;
int i;
if (!pd)
return -EINVAL;
rcu_read_lock();
table = em_perf_state_from_pd(pd);
for (i = 0; i < pd->nr_perf_states; i++) {
if (freq_min_khz == table[i].frequency)
min_ps = i;
if (freq_max_khz == table[i].frequency)
max_ps = i;
}
rcu_read_unlock();
/* Only update when both are found and sane */
if (min_ps < 0 || max_ps < 0 || max_ps < min_ps)
return -EINVAL;
/* Guard simultaneous updates and make them atomic */
mutex_lock(&em_pd_mutex);
pd->min_perf_state = min_ps;
pd->max_perf_state = max_ps;
mutex_unlock(&em_pd_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(em_update_performance_limits);

View File

@@ -3700,12 +3700,14 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
unsigned long rss = 0;
unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved, folio_type;
pgoff_t first_pgoff = 0;
pgoff_t orig_start_pgoff = start_pgoff;
rcu_read_lock();
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
if (!folio)
goto out;
first_pgoff = xas.xa_index;
orig_start_pgoff = xas.xa_index;
if (filemap_map_pmd(vmf, folio, start_pgoff)) {
ret = VM_FAULT_NOPAGE;
@@ -3756,7 +3758,8 @@ out:
WRITE_ONCE(file->f_ra.mmap_miss, 0);
else
WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss);
trace_android_vh_filemap_map_pages(file, first_pgoff, last_pgoff, ret);
trace_android_vh_filemap_map_pages(file, orig_start_pgoff,
first_pgoff, last_pgoff, ret);
return ret;
}

View File

@@ -12,11 +12,8 @@ _COMMON_GKI_MODULES_LIST = [
"drivers/block/virtio_blk.ko",
"drivers/block/zram/zram.ko",
"drivers/bluetooth/btbcm.ko",
"drivers/bluetooth/btintel.ko",
"drivers/bluetooth/btqca.ko",
"drivers/bluetooth/btrtl.ko",
"drivers/bluetooth/btsdio.ko",
"drivers/bluetooth/btusb.ko",
"drivers/bluetooth/hci_uart.ko",
"drivers/char/virtio_console.ko",
"drivers/gnss/gnss.ko",

View File

@@ -14,7 +14,8 @@ bluetooth_6lowpan-y := 6lowpan.o
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \
ecdh_helper.o mgmt_util.o mgmt_config.o hci_codec.o eir.o hci_sync.o
ecdh_helper.o mgmt_util.o mgmt_config.o hci_codec.o eir.o hci_sync.o \
hci_drv.o
bluetooth-$(CONFIG_DEV_COREDUMP) += coredump.o

View File

@@ -57,6 +57,7 @@ DEFINE_RWLOCK(hci_dev_list_lock);
/* HCI callback list */
LIST_HEAD(hci_cb_list);
DEFINE_MUTEX(hci_cb_list_lock);
/* HCI ID Numbering */
static DEFINE_IDA(hci_index_ida);
@@ -2930,6 +2931,8 @@ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
break;
case HCI_ISODATA_PKT:
break;
case HCI_DRV_PKT:
break;
default:
kfree_skb(skb);
return -EINVAL;
@@ -2992,7 +2995,9 @@ int hci_register_cb(struct hci_cb *cb)
{
BT_DBG("%p name %s", cb, cb->name);
list_add_tail_rcu(&cb->list, &hci_cb_list);
mutex_lock(&hci_cb_list_lock);
list_add_tail(&cb->list, &hci_cb_list);
mutex_unlock(&hci_cb_list_lock);
return 0;
}
@@ -3002,8 +3007,9 @@ int hci_unregister_cb(struct hci_cb *cb)
{
BT_DBG("%p name %s", cb, cb->name);
list_del_rcu(&cb->list);
synchronize_rcu();
mutex_lock(&hci_cb_list_lock);
list_del(&cb->list);
mutex_unlock(&hci_cb_list_lock);
return 0;
}
@@ -3035,6 +3041,15 @@ static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return -EINVAL;
}
if (hci_skb_pkt_type(skb) == HCI_DRV_PKT) {
/* Intercept HCI Drv packet here and don't go with hdev->send
* callback.
*/
err = hci_drv_process_cmd(hdev, skb);
kfree_skb(skb);
return err;
}
err = hdev->send(hdev, skb);
if (err < 0) {
bt_dev_err(hdev, "sending frame failed (%d)", err);

105
net/bluetooth/hci_drv.c Normal file
View File

@@ -0,0 +1,105 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2025 Google Corporation
*/
#include <linux/skbuff.h>
#include <linux/types.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/hci_drv.h>
int hci_drv_cmd_status(struct hci_dev *hdev, u16 cmd, u8 status)
{
struct hci_drv_ev_hdr *hdr;
struct hci_drv_ev_cmd_status *ev;
struct sk_buff *skb;
skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
if (!skb)
return -ENOMEM;
hdr = skb_put(skb, sizeof(*hdr));
hdr->opcode = __cpu_to_le16(HCI_DRV_EV_CMD_STATUS);
hdr->len = __cpu_to_le16(sizeof(*ev));
ev = skb_put(skb, sizeof(*ev));
ev->opcode = __cpu_to_le16(cmd);
ev->status = status;
hci_skb_pkt_type(skb) = HCI_DRV_PKT;
return hci_recv_frame(hdev, skb);
}
EXPORT_SYMBOL(hci_drv_cmd_status);
int hci_drv_cmd_complete(struct hci_dev *hdev, u16 cmd, u8 status, void *rp,
size_t rp_len)
{
struct hci_drv_ev_hdr *hdr;
struct hci_drv_ev_cmd_complete *ev;
struct sk_buff *skb;
skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
hdr = skb_put(skb, sizeof(*hdr));
hdr->opcode = __cpu_to_le16(HCI_DRV_EV_CMD_COMPLETE);
hdr->len = __cpu_to_le16(sizeof(*ev) + rp_len);
ev = skb_put(skb, sizeof(*ev));
ev->opcode = __cpu_to_le16(cmd);
ev->status = status;
skb_put_data(skb, rp, rp_len);
hci_skb_pkt_type(skb) = HCI_DRV_PKT;
return hci_recv_frame(hdev, skb);
}
EXPORT_SYMBOL(hci_drv_cmd_complete);
int hci_drv_process_cmd(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_drv_cmd_hdr *hdr;
const struct hci_drv_handler *handler = NULL;
u16 opcode, len, ogf, ocf;
hdr = skb_pull_data(skb, sizeof(*hdr));
if (!hdr)
return -EILSEQ;
opcode = __le16_to_cpu(hdr->opcode);
len = __le16_to_cpu(hdr->len);
if (len != skb->len)
return -EILSEQ;
ogf = hci_opcode_ogf(opcode);
ocf = hci_opcode_ocf(opcode);
if (!hdev->hci_drv)
return hci_drv_cmd_status(hdev, opcode,
HCI_DRV_STATUS_UNKNOWN_COMMAND);
if (ogf != HCI_DRV_OGF_DRIVER_SPECIFIC) {
if (opcode < hdev->hci_drv->common_handler_count)
handler = &hdev->hci_drv->common_handlers[opcode];
} else {
if (ocf < hdev->hci_drv->specific_handler_count)
handler = &hdev->hci_drv->specific_handlers[ocf];
}
if (!handler || !handler->func)
return hci_drv_cmd_status(hdev, opcode,
HCI_DRV_STATUS_UNKNOWN_COMMAND);
if (len != handler->data_len)
return hci_drv_cmd_status(hdev, opcode,
HCI_DRV_STATUS_INVALID_PARAMETERS);
return handler->func(hdev, skb->data, len);
}
EXPORT_SYMBOL(hci_drv_process_cmd);

View File

@@ -234,7 +234,8 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
hci_skb_pkt_type(skb) != HCI_ISODATA_PKT &&
hci_skb_pkt_type(skb) != HCI_DRV_PKT)
continue;
} else {
/* Don't send frame to other channel types */
@@ -391,6 +392,12 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
else
opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
break;
case HCI_DRV_PKT:
if (bt_cb(skb)->incoming)
opcode = cpu_to_le16(HCI_MON_DRV_RX_PKT);
else
opcode = cpu_to_le16(HCI_MON_DRV_TX_PKT);
break;
case HCI_DIAG_PKT:
opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
break;
@@ -1860,7 +1867,8 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
hci_skb_pkt_type(skb) != HCI_ISODATA_PKT &&
hci_skb_pkt_type(skb) != HCI_DRV_PKT) {
err = -EINVAL;
goto drop;
}

View File

@@ -2137,11 +2137,6 @@ done:
return HCI_LM_ACCEPT;
}
static bool iso_match(struct hci_conn *hcon)
{
return hcon->type == ISO_LINK || hcon->type == LE_LINK;
}
static void iso_connect_cfm(struct hci_conn *hcon, __u8 status)
{
if (hcon->type != ISO_LINK) {
@@ -2323,7 +2318,6 @@ drop:
static struct hci_cb iso_cb = {
.name = "ISO",
.match = iso_match,
.connect_cfm = iso_connect_cfm,
.disconn_cfm = iso_disconn_cfm,
};

View File

@@ -119,7 +119,6 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
{
struct l2cap_chan *c;
mutex_lock(&conn->chan_lock);
c = __l2cap_get_chan_by_scid(conn, cid);
if (c) {
/* Only lock if chan reference is not 0 */
@@ -127,7 +126,6 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
if (c)
l2cap_chan_lock(c);
}
mutex_unlock(&conn->chan_lock);
return c;
}
@@ -140,7 +138,6 @@ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
{
struct l2cap_chan *c;
mutex_lock(&conn->chan_lock);
c = __l2cap_get_chan_by_dcid(conn, cid);
if (c) {
/* Only lock if chan reference is not 0 */
@@ -148,7 +145,6 @@ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
if (c)
l2cap_chan_lock(c);
}
mutex_unlock(&conn->chan_lock);
return c;
}
@@ -418,7 +414,7 @@ static void l2cap_chan_timeout(struct work_struct *work)
if (!conn)
return;
mutex_lock(&conn->chan_lock);
mutex_lock(&conn->lock);
/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
* this work. No need to call l2cap_chan_hold(chan) here again.
*/
@@ -439,7 +435,7 @@ static void l2cap_chan_timeout(struct work_struct *work)
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock);
mutex_unlock(&conn->lock);
}
struct l2cap_chan *l2cap_chan_create(void)
@@ -642,9 +638,9 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
{
mutex_lock(&conn->chan_lock);
mutex_lock(&conn->lock);
__l2cap_chan_add(conn, chan);
mutex_unlock(&conn->chan_lock);
mutex_unlock(&conn->lock);
}
void l2cap_chan_del(struct l2cap_chan *chan, int err)
@@ -732,9 +728,9 @@ void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
if (!conn)
return;
mutex_lock(&conn->chan_lock);
mutex_lock(&conn->lock);
__l2cap_chan_list(conn, func, data);
mutex_unlock(&conn->chan_lock);
mutex_unlock(&conn->lock);
}
EXPORT_SYMBOL_GPL(l2cap_chan_list);
@@ -746,7 +742,7 @@ static void l2cap_conn_update_id_addr(struct work_struct *work)
struct hci_conn *hcon = conn->hcon;
struct l2cap_chan *chan;
mutex_lock(&conn->chan_lock);
mutex_lock(&conn->lock);
list_for_each_entry(chan, &conn->chan_l, list) {
l2cap_chan_lock(chan);
@@ -755,7 +751,7 @@ static void l2cap_conn_update_id_addr(struct work_struct *work)
l2cap_chan_unlock(chan);
}
mutex_unlock(&conn->chan_lock);
mutex_unlock(&conn->lock);
}
static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
@@ -1508,8 +1504,6 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
BT_DBG("conn %p", conn);
mutex_lock(&conn->chan_lock);
list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
l2cap_chan_lock(chan);
@@ -1578,8 +1572,6 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
l2cap_chan_unlock(chan);
}
mutex_unlock(&conn->chan_lock);
}
static void l2cap_le_conn_ready(struct l2cap_conn *conn)
@@ -1625,7 +1617,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
if (hcon->type == ACL_LINK)
l2cap_request_info(conn);
mutex_lock(&conn->chan_lock);
mutex_lock(&conn->lock);
list_for_each_entry(chan, &conn->chan_l, list) {
@@ -1643,7 +1635,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
l2cap_chan_unlock(chan);
}
mutex_unlock(&conn->chan_lock);
mutex_unlock(&conn->lock);
if (hcon->type == LE_LINK)
l2cap_le_conn_ready(conn);
@@ -1658,14 +1650,10 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
BT_DBG("conn %p", conn);
mutex_lock(&conn->chan_lock);
list_for_each_entry(chan, &conn->chan_l, list) {
if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
l2cap_chan_set_err(chan, err);
}
mutex_unlock(&conn->chan_lock);
}
static void l2cap_info_timeout(struct work_struct *work)
@@ -1676,7 +1664,9 @@ static void l2cap_info_timeout(struct work_struct *work)
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
conn->info_ident = 0;
mutex_lock(&conn->lock);
l2cap_conn_start(conn);
mutex_unlock(&conn->lock);
}
/*
@@ -1768,6 +1758,8 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
mutex_lock(&conn->lock);
kfree_skb(conn->rx_skb);
skb_queue_purge(&conn->pending_rx);
@@ -1786,8 +1778,6 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
/* Force the connection to be immediately dropped */
hcon->disc_timeout = 0;
mutex_lock(&conn->chan_lock);
/* Kill channels */
list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
l2cap_chan_hold(chan);
@@ -1801,12 +1791,14 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
l2cap_chan_put(chan);
}
mutex_unlock(&conn->chan_lock);
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
cancel_delayed_work_sync(&conn->info_timer);
hci_chan_del(conn->hchan);
conn->hchan = NULL;
hcon->l2cap_data = NULL;
mutex_unlock(&conn->lock);
l2cap_conn_put(conn);
}
@@ -1814,7 +1806,6 @@ static void l2cap_conn_free(struct kref *ref)
{
struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
hci_chan_del(conn->hchan);
hci_conn_put(conn->hcon);
kfree(conn);
}
@@ -2925,8 +2916,6 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
BT_DBG("conn %p", conn);
mutex_lock(&conn->chan_lock);
list_for_each_entry(chan, &conn->chan_l, list) {
if (chan->chan_type != L2CAP_CHAN_RAW)
continue;
@@ -2941,8 +2930,6 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
if (chan->ops->recv(chan, nskb))
kfree_skb(nskb);
}
mutex_unlock(&conn->chan_lock);
}
/* ---- L2CAP signalling commands ---- */
@@ -3965,7 +3952,6 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
goto response;
}
mutex_lock(&conn->chan_lock);
l2cap_chan_lock(pchan);
/* Check if the ACL is secure enough (if not SDP) */
@@ -4072,7 +4058,6 @@ response:
}
l2cap_chan_unlock(pchan);
mutex_unlock(&conn->chan_lock);
l2cap_chan_put(pchan);
}
@@ -4111,27 +4096,19 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
dcid, scid, result, status);
mutex_lock(&conn->chan_lock);
if (scid) {
chan = __l2cap_get_chan_by_scid(conn, scid);
if (!chan) {
err = -EBADSLT;
goto unlock;
}
if (!chan)
return -EBADSLT;
} else {
chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
if (!chan) {
err = -EBADSLT;
goto unlock;
}
if (!chan)
return -EBADSLT;
}
chan = l2cap_chan_hold_unless_zero(chan);
if (!chan) {
err = -EBADSLT;
goto unlock;
}
if (!chan)
return -EBADSLT;
err = 0;
@@ -4169,9 +4146,6 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
unlock:
mutex_unlock(&conn->chan_lock);
return err;
}
@@ -4459,11 +4433,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
chan->ops->set_shutdown(chan);
l2cap_chan_unlock(chan);
mutex_lock(&conn->chan_lock);
l2cap_chan_lock(chan);
l2cap_chan_del(chan, ECONNRESET);
mutex_unlock(&conn->chan_lock);
chan->ops->close(chan);
@@ -4500,11 +4470,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
return 0;
}
l2cap_chan_unlock(chan);
mutex_lock(&conn->chan_lock);
l2cap_chan_lock(chan);
l2cap_chan_del(chan, 0);
mutex_unlock(&conn->chan_lock);
chan->ops->close(chan);
@@ -4702,13 +4668,9 @@ static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
dcid, mtu, mps, credits, result);
mutex_lock(&conn->chan_lock);
chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
if (!chan) {
err = -EBADSLT;
goto unlock;
}
if (!chan)
return -EBADSLT;
err = 0;
@@ -4756,9 +4718,6 @@ static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
l2cap_chan_unlock(chan);
unlock:
mutex_unlock(&conn->chan_lock);
return err;
}
@@ -4870,7 +4829,6 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
goto response;
}
mutex_lock(&conn->chan_lock);
l2cap_chan_lock(pchan);
if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
@@ -4936,7 +4894,6 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
response_unlock:
l2cap_chan_unlock(pchan);
mutex_unlock(&conn->chan_lock);
l2cap_chan_put(pchan);
if (result == L2CAP_CR_PEND)
@@ -5070,7 +5027,6 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
goto response;
}
mutex_lock(&conn->chan_lock);
l2cap_chan_lock(pchan);
if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
@@ -5145,7 +5101,6 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
unlock:
l2cap_chan_unlock(pchan);
mutex_unlock(&conn->chan_lock);
l2cap_chan_put(pchan);
response:
@@ -5182,8 +5137,6 @@ static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
result);
mutex_lock(&conn->chan_lock);
cmd_len -= sizeof(*rsp);
list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
@@ -5269,8 +5222,6 @@ static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
l2cap_chan_unlock(chan);
}
mutex_unlock(&conn->chan_lock);
return err;
}
@@ -5383,8 +5334,6 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
if (cmd_len < sizeof(*rej))
return -EPROTO;
mutex_lock(&conn->chan_lock);
chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
if (!chan)
goto done;
@@ -5399,7 +5348,6 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
l2cap_chan_put(chan);
done:
mutex_unlock(&conn->chan_lock);
return 0;
}
@@ -6854,8 +6802,12 @@ static void process_pending_rx(struct work_struct *work)
BT_DBG("");
mutex_lock(&conn->lock);
while ((skb = skb_dequeue(&conn->pending_rx)))
l2cap_recv_frame(conn, skb);
mutex_unlock(&conn->lock);
}
static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
@@ -6894,7 +6846,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
mutex_init(&conn->ident_lock);
mutex_init(&conn->chan_lock);
mutex_init(&conn->lock);
INIT_LIST_HEAD(&conn->chan_l);
INIT_LIST_HEAD(&conn->users);
@@ -7085,7 +7037,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
}
}
mutex_lock(&conn->chan_lock);
mutex_lock(&conn->lock);
l2cap_chan_lock(chan);
if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
@@ -7126,7 +7078,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
chan_unlock:
l2cap_chan_unlock(chan);
mutex_unlock(&conn->chan_lock);
mutex_unlock(&conn->lock);
done:
hci_dev_unlock(hdev);
hci_dev_put(hdev);
@@ -7230,11 +7182,6 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
return NULL;
}
static bool l2cap_match(struct hci_conn *hcon)
{
return hcon->type == ACL_LINK || hcon->type == LE_LINK;
}
static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
{
struct hci_dev *hdev = hcon->hdev;
@@ -7242,6 +7189,9 @@ static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
struct l2cap_chan *pchan;
u8 dst_type;
if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
return;
BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
if (status) {
@@ -7306,6 +7256,9 @@ int l2cap_disconn_ind(struct hci_conn *hcon)
static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
{
if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
return;
BT_DBG("hcon %p reason %d", hcon, reason);
l2cap_conn_del(hcon, bt_to_errno(reason));
@@ -7338,7 +7291,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
mutex_lock(&conn->chan_lock);
mutex_lock(&conn->lock);
list_for_each_entry(chan, &conn->chan_l, list) {
l2cap_chan_lock(chan);
@@ -7412,7 +7365,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
l2cap_chan_unlock(chan);
}
mutex_unlock(&conn->chan_lock);
mutex_unlock(&conn->lock);
}
/* Append fragment into frame respecting the maximum len of rx_skb */
@@ -7479,8 +7432,11 @@ static void l2cap_recv_reset(struct l2cap_conn *conn)
conn->rx_len = 0;
}
static struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
{
if (!c)
return NULL;
BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref));
if (!kref_get_unless_zero(&c->ref))
@@ -7506,11 +7462,15 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
hci_dev_unlock(hcon->hdev);
if (!conn)
goto drop;
if (!conn) {
kfree_skb(skb);
return;
}
BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
mutex_lock(&conn->lock);
switch (flags) {
case ACL_START:
case ACL_START_NO_FLUSH:
@@ -7535,7 +7495,7 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
if (len == skb->len) {
/* Complete frame received */
l2cap_recv_frame(conn, skb);
return;
goto unlock;
}
BT_DBG("Start: total len %d, frag len %u", len, skb->len);
@@ -7597,15 +7557,15 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
break;
}
l2cap_conn_put(conn);
drop:
kfree_skb(skb);
unlock:
mutex_unlock(&conn->lock);
l2cap_conn_put(conn);
}
static struct hci_cb l2cap_cb = {
.name = "L2CAP",
.match = l2cap_match,
.connect_cfm = l2cap_connect_cfm,
.disconn_cfm = l2cap_disconn_cfm,
.security_cfm = l2cap_security_cfm,

View File

@@ -1326,9 +1326,10 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
/* prevent sk structure from being freed whilst unlocked */
sock_hold(sk);
chan = l2cap_pi(sk)->chan;
/* prevent chan structure from being freed whilst unlocked */
l2cap_chan_hold(chan);
chan = l2cap_chan_hold_unless_zero(l2cap_pi(sk)->chan);
if (!chan)
goto shutdown_already;
BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
@@ -1358,22 +1359,20 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
release_sock(sk);
l2cap_chan_lock(chan);
conn = chan->conn;
if (conn)
/* prevent conn structure from being freed */
l2cap_conn_get(conn);
/* prevent conn structure from being freed */
conn = l2cap_conn_hold_unless_zero(chan->conn);
l2cap_chan_unlock(chan);
if (conn)
/* mutex lock must be taken before l2cap_chan_lock() */
mutex_lock(&conn->chan_lock);
mutex_lock(&conn->lock);
l2cap_chan_lock(chan);
l2cap_chan_close(chan, 0);
l2cap_chan_unlock(chan);
if (conn) {
mutex_unlock(&conn->chan_lock);
mutex_unlock(&conn->lock);
l2cap_conn_put(conn);
}

View File

@@ -2134,11 +2134,6 @@ static int rfcomm_run(void *unused)
return 0;
}
static bool rfcomm_match(struct hci_conn *hcon)
{
return hcon->type == ACL_LINK;
}
static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
{
struct rfcomm_session *s;
@@ -2185,7 +2180,6 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
static struct hci_cb rfcomm_cb = {
.name = "RFCOMM",
.match = rfcomm_match,
.security_cfm = rfcomm_security_cfm
};

View File

@@ -1355,13 +1355,11 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
return lm;
}
static bool sco_match(struct hci_conn *hcon)
{
return hcon->type == SCO_LINK || hcon->type == ESCO_LINK;
}
static void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
{
if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
return;
BT_DBG("hcon %p bdaddr %pMR status %u", hcon, &hcon->dst, status);
if (!status) {
@@ -1376,6 +1374,9 @@ static void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
{
if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
return;
BT_DBG("hcon %p reason %d", hcon, reason);
sco_conn_del(hcon, bt_to_errno(reason));
@@ -1401,7 +1402,6 @@ drop:
static struct hci_cb sco_cb = {
.name = "SCO",
.match = sco_match,
.connect_cfm = sco_connect_cfm,
.disconn_cfm = sco_disconn_cfm,
};

View File

@@ -287,8 +287,8 @@ int can_send(struct sk_buff *skb, int loop)
netif_rx(newskb);
/* update statistics */
pkg_stats->tx_frames++;
pkg_stats->tx_frames_delta++;
atomic_long_inc(&pkg_stats->tx_frames);
atomic_long_inc(&pkg_stats->tx_frames_delta);
return 0;
@@ -647,8 +647,8 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
int matches;
/* update statistics */
pkg_stats->rx_frames++;
pkg_stats->rx_frames_delta++;
atomic_long_inc(&pkg_stats->rx_frames);
atomic_long_inc(&pkg_stats->rx_frames_delta);
/* create non-zero unique skb identifier together with *skb */
while (!(can_skb_prv(skb)->skbcnt))
@@ -669,8 +669,8 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
consume_skb(skb);
if (matches > 0) {
pkg_stats->matches++;
pkg_stats->matches_delta++;
atomic_long_inc(&pkg_stats->matches);
atomic_long_inc(&pkg_stats->matches_delta);
}
}

View File

@@ -66,9 +66,9 @@ struct receiver {
struct can_pkg_stats {
unsigned long jiffies_init;
unsigned long rx_frames;
unsigned long tx_frames;
unsigned long matches;
atomic_long_t rx_frames;
atomic_long_t tx_frames;
atomic_long_t matches;
unsigned long total_rx_rate;
unsigned long total_tx_rate;
@@ -82,9 +82,9 @@ struct can_pkg_stats {
unsigned long max_tx_rate;
unsigned long max_rx_match_ratio;
unsigned long rx_frames_delta;
unsigned long tx_frames_delta;
unsigned long matches_delta;
atomic_long_t rx_frames_delta;
atomic_long_t tx_frames_delta;
atomic_long_t matches_delta;
};
/* persistent statistics */

View File

@@ -118,6 +118,13 @@ void can_stat_update(struct timer_list *t)
struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
unsigned long j = jiffies; /* snapshot */
long rx_frames = atomic_long_read(&pkg_stats->rx_frames);
long tx_frames = atomic_long_read(&pkg_stats->tx_frames);
long matches = atomic_long_read(&pkg_stats->matches);
long rx_frames_delta = atomic_long_read(&pkg_stats->rx_frames_delta);
long tx_frames_delta = atomic_long_read(&pkg_stats->tx_frames_delta);
long matches_delta = atomic_long_read(&pkg_stats->matches_delta);
/* restart counting in timer context on user request */
if (user_reset)
can_init_stats(net);
@@ -127,35 +134,33 @@ void can_stat_update(struct timer_list *t)
can_init_stats(net);
/* prevent overflow in calc_rate() */
if (pkg_stats->rx_frames > (ULONG_MAX / HZ))
if (rx_frames > (LONG_MAX / HZ))
can_init_stats(net);
/* prevent overflow in calc_rate() */
if (pkg_stats->tx_frames > (ULONG_MAX / HZ))
if (tx_frames > (LONG_MAX / HZ))
can_init_stats(net);
/* matches overflow - very improbable */
if (pkg_stats->matches > (ULONG_MAX / 100))
if (matches > (LONG_MAX / 100))
can_init_stats(net);
/* calc total values */
if (pkg_stats->rx_frames)
pkg_stats->total_rx_match_ratio = (pkg_stats->matches * 100) /
pkg_stats->rx_frames;
if (rx_frames)
pkg_stats->total_rx_match_ratio = (matches * 100) / rx_frames;
pkg_stats->total_tx_rate = calc_rate(pkg_stats->jiffies_init, j,
pkg_stats->tx_frames);
tx_frames);
pkg_stats->total_rx_rate = calc_rate(pkg_stats->jiffies_init, j,
pkg_stats->rx_frames);
rx_frames);
/* calc current values */
if (pkg_stats->rx_frames_delta)
if (rx_frames_delta)
pkg_stats->current_rx_match_ratio =
(pkg_stats->matches_delta * 100) /
pkg_stats->rx_frames_delta;
(matches_delta * 100) / rx_frames_delta;
pkg_stats->current_tx_rate = calc_rate(0, HZ, pkg_stats->tx_frames_delta);
pkg_stats->current_rx_rate = calc_rate(0, HZ, pkg_stats->rx_frames_delta);
pkg_stats->current_tx_rate = calc_rate(0, HZ, tx_frames_delta);
pkg_stats->current_rx_rate = calc_rate(0, HZ, rx_frames_delta);
/* check / update maximum values */
if (pkg_stats->max_tx_rate < pkg_stats->current_tx_rate)
@@ -168,9 +173,9 @@ void can_stat_update(struct timer_list *t)
pkg_stats->max_rx_match_ratio = pkg_stats->current_rx_match_ratio;
/* clear values for 'current rate' calculation */
pkg_stats->tx_frames_delta = 0;
pkg_stats->rx_frames_delta = 0;
pkg_stats->matches_delta = 0;
atomic_long_set(&pkg_stats->tx_frames_delta, 0);
atomic_long_set(&pkg_stats->rx_frames_delta, 0);
atomic_long_set(&pkg_stats->matches_delta, 0);
/* restart timer (one second) */
mod_timer(&net->can.stattimer, round_jiffies(jiffies + HZ));
@@ -214,9 +219,12 @@ static int can_stats_proc_show(struct seq_file *m, void *v)
struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
seq_putc(m, '\n');
seq_printf(m, " %8ld transmitted frames (TXF)\n", pkg_stats->tx_frames);
seq_printf(m, " %8ld received frames (RXF)\n", pkg_stats->rx_frames);
seq_printf(m, " %8ld matched frames (RXMF)\n", pkg_stats->matches);
seq_printf(m, " %8ld transmitted frames (TXF)\n",
atomic_long_read(&pkg_stats->tx_frames));
seq_printf(m, " %8ld received frames (RXF)\n",
atomic_long_read(&pkg_stats->rx_frames));
seq_printf(m, " %8ld matched frames (RXMF)\n",
atomic_long_read(&pkg_stats->matches));
seq_putc(m, '\n');

View File

@@ -389,3 +389,4 @@ static void __exit exit_dns_resolver(void)
module_init(init_dns_resolver)
module_exit(exit_dns_resolver)
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);