Merge 8e7bb66360 ("can: flexcan: add NXP S32G2/S32G3 SoC support") into android16-6.12-lts

Steps on the way to 6.12.24

Change-Id: I54cb46effb1fb5a2ed725fea8a4166800602958f
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2025-05-08 16:29:55 +00:00
65 changed files with 1346 additions and 332 deletions

View File

@@ -3054,6 +3054,8 @@
* max_sec_lba48: Set or clear transfer size limit to
65535 sectors.
* external: Mark port as external (hotplug-capable).
* [no]lpm: Enable or disable link power management.
* [no]setxfer: Indicate if transfer speed mode setting

View File

@@ -119,6 +119,7 @@
#define QCOM_CPU_PART_KRYO 0x200
#define QCOM_CPU_PART_KRYO_2XX_GOLD 0x800
#define QCOM_CPU_PART_KRYO_2XX_SILVER 0x801
#define QCOM_CPU_PART_KRYO_3XX_GOLD 0x802
#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
#define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804
#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
@@ -195,6 +196,7 @@
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
#define MIDR_QCOM_KRYO_2XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_GOLD)
#define MIDR_QCOM_KRYO_2XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_SILVER)
#define MIDR_QCOM_KRYO_3XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_GOLD)
#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)

View File

@@ -2435,18 +2435,20 @@ config CC_HAS_NAMED_AS
def_bool $(success,echo 'int __seg_fs fs; int __seg_gs gs;' | $(CC) -x c - -S -o /dev/null)
depends on CC_IS_GCC
#
# -fsanitize=kernel-address (KASAN) and -fsanitize=thread (KCSAN)
# are incompatible with named address spaces with GCC < 13.3
# (see GCC PR sanitizer/111736 and also PR sanitizer/115172).
#
config CC_HAS_NAMED_AS_FIXED_SANITIZERS
def_bool CC_IS_GCC && GCC_VERSION >= 130300
def_bool y
depends on !(KASAN || KCSAN) || GCC_VERSION >= 130300
depends on !(UBSAN_BOOL && KASAN) || GCC_VERSION >= 140200
config USE_X86_SEG_SUPPORT
def_bool y
depends on CC_HAS_NAMED_AS
#
# -fsanitize=kernel-address (KASAN) and -fsanitize=thread
# (KCSAN) are incompatible with named address spaces with
# GCC < 13.3 - see GCC PR sanitizer/111736.
#
depends on !(KASAN || KCSAN) || CC_HAS_NAMED_AS_FIXED_SANITIZERS
def_bool CC_HAS_NAMED_AS
depends on CC_HAS_NAMED_AS_FIXED_SANITIZERS
config CC_HAS_SLS
def_bool $(cc-option,-mharden-sls=all)

View File

@@ -627,7 +627,7 @@ static void init_amd_k8(struct cpuinfo_x86 *c)
* (model = 0x14) and later actually support it.
* (AMD Erratum #110, docId: 25759).
*/
if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM) && !cpu_has(c, X86_FEATURE_HYPERVISOR)) {
clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
if (!rdmsrl_amd_safe(0xc001100d, &value)) {
value &= ~BIT_64(32);

View File

@@ -33,25 +33,55 @@
#include <asm/smap.h>
#include <asm/gsseg.h>
/*
* The first GDT descriptor is reserved as 'NULL descriptor'. As bits 0
* and 1 of a segment selector, i.e., the RPL bits, are NOT used to index
* GDT, selector values 0~3 all point to the NULL descriptor, thus values
* 0, 1, 2 and 3 are all valid NULL selector values.
*
* However IRET zeros ES, FS, GS, and DS segment registers if any of them
* is found to have any nonzero NULL selector value, which can be used by
* userspace in pre-FRED systems to spot any interrupt/exception by loading
* a nonzero NULL selector and waiting for it to become zero. Before FRED
* there was nothing software could do to prevent such an information leak.
*
* ERETU, the only legit instruction to return to userspace from kernel
* under FRED, by design does NOT zero any segment register to avoid this
* problem behavior.
*
* As such, leave NULL selector values 0~3 unchanged.
*/
static inline u16 fixup_rpl(u16 sel)
{
return sel <= 3 ? sel : sel | 3;
}
#ifdef CONFIG_IA32_EMULATION
#include <asm/unistd_32_ia32.h>
static inline void reload_segments(struct sigcontext_32 *sc)
{
unsigned int cur;
u16 cur;
/*
* Reload fs and gs if they have changed in the signal
* handler. This does not handle long fs/gs base changes in
* the handler, but does not clobber them at least in the
* normal case.
*/
savesegment(gs, cur);
if ((sc->gs | 0x03) != cur)
load_gs_index(sc->gs | 0x03);
if (fixup_rpl(sc->gs) != cur)
load_gs_index(fixup_rpl(sc->gs));
savesegment(fs, cur);
if ((sc->fs | 0x03) != cur)
loadsegment(fs, sc->fs | 0x03);
if (fixup_rpl(sc->fs) != cur)
loadsegment(fs, fixup_rpl(sc->fs));
savesegment(ds, cur);
if ((sc->ds | 0x03) != cur)
loadsegment(ds, sc->ds | 0x03);
if (fixup_rpl(sc->ds) != cur)
loadsegment(ds, fixup_rpl(sc->ds));
savesegment(es, cur);
if ((sc->es | 0x03) != cur)
loadsegment(es, sc->es | 0x03);
if (fixup_rpl(sc->es) != cur)
loadsegment(es, fixup_rpl(sc->es));
}
#define sigset32_t compat_sigset_t
@@ -105,18 +135,12 @@ static bool ia32_restore_sigcontext(struct pt_regs *regs,
regs->orig_ax = -1;
#ifdef CONFIG_IA32_EMULATION
/*
* Reload fs and gs if they have changed in the signal
* handler. This does not handle long fs/gs base changes in
* the handler, but does not clobber them at least in the
* normal case.
*/
reload_segments(&sc);
#else
loadsegment(gs, sc.gs);
regs->fs = sc.fs;
regs->es = sc.es;
regs->ds = sc.ds;
loadsegment(gs, fixup_rpl(sc.gs));
regs->fs = fixup_rpl(sc.fs);
regs->es = fixup_rpl(sc.es);
regs->ds = fixup_rpl(sc.ds);
#endif
return fpu__restore_sig(compat_ptr(sc.fpstate), 1);

View File

@@ -2422,7 +2422,7 @@ static int __set_pages_np(struct page *page, int numpages)
.pgd = NULL,
.numpages = numpages,
.mask_set = __pgprot(0),
.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY),
.flags = CPA_NO_CHECK_ALIAS };
/*
@@ -2501,7 +2501,7 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
.pgd = pgd,
.numpages = numpages,
.mask_set = __pgprot(0),
.mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)),
.mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW|_PAGE_DIRTY)),
.flags = CPA_NO_CHECK_ALIAS,
};
@@ -2544,7 +2544,7 @@ int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
.pgd = pgd,
.numpages = numpages,
.mask_set = __pgprot(0),
.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY),
.flags = CPA_NO_CHECK_ALIAS,
};

View File

@@ -88,6 +88,7 @@ struct ata_force_param {
unsigned int xfer_mask;
unsigned int quirk_on;
unsigned int quirk_off;
unsigned int pflags_on;
u16 lflags_on;
u16 lflags_off;
};
@@ -331,6 +332,35 @@ void ata_force_cbl(struct ata_port *ap)
}
}
/**
* ata_force_pflags - force port flags according to libata.force
* @ap: ATA port of interest
*
* Force port flags according to libata.force and whine about it.
*
* LOCKING:
* EH context.
*/
static void ata_force_pflags(struct ata_port *ap)
{
int i;
for (i = ata_force_tbl_size - 1; i >= 0; i--) {
const struct ata_force_ent *fe = &ata_force_tbl[i];
if (fe->port != -1 && fe->port != ap->print_id)
continue;
/* let pflags stack */
if (fe->param.pflags_on) {
ap->pflags |= fe->param.pflags_on;
ata_port_notice(ap,
"FORCE: port flag 0x%x forced -> 0x%x\n",
fe->param.pflags_on, ap->pflags);
}
}
}
/**
* ata_force_link_limits - force link limits according to libata.force
* @link: ATA link of interest
@@ -486,6 +516,7 @@ static void ata_force_quirks(struct ata_device *dev)
}
}
#else
static inline void ata_force_pflags(struct ata_port *ap) { }
static inline void ata_force_link_limits(struct ata_link *link) { }
static inline void ata_force_xfermask(struct ata_device *dev) { }
static inline void ata_force_quirks(struct ata_device *dev) { }
@@ -5460,6 +5491,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
#endif
ata_sff_port_init(ap);
ata_force_pflags(ap);
return ap;
}
EXPORT_SYMBOL_GPL(ata_port_alloc);
@@ -6272,6 +6305,9 @@ EXPORT_SYMBOL_GPL(ata_platform_remove_one);
{ "no" #name, .lflags_on = (flags) }, \
{ #name, .lflags_off = (flags) }
#define force_pflag_on(name, flags) \
{ #name, .pflags_on = (flags) }
#define force_quirk_on(name, flag) \
{ #name, .quirk_on = (flag) }
@@ -6331,6 +6367,8 @@ static const struct ata_force_param force_tbl[] __initconst = {
force_lflag_on(rstonce, ATA_LFLAG_RST_ONCE),
force_lflag_onoff(dbdelay, ATA_LFLAG_NO_DEBOUNCE_DELAY),
force_pflag_on(external, ATA_PFLAG_EXTERNAL),
force_quirk_onoff(ncq, ATA_QUIRK_NONCQ),
force_quirk_onoff(ncqtrim, ATA_QUIRK_NO_NCQ_TRIM),
force_quirk_onoff(ncqati, ATA_QUIRK_NO_NCQ_ON_ATI),

View File

@@ -1205,6 +1205,20 @@ config HID_U2FZERO
allow setting the brightness to anything but 1, which will
trigger a single blink and immediately reset back to 0.
config HID_UNIVERSAL_PIDFF
tristate "universal-pidff: extended USB PID driver compatibility and usage"
depends on USB_HID
depends on HID_PID
help
Extended PID support for selected devices.
Contains report fixups, extended usable button range and
pidff quirk management to extend compatibility with slightly
non-compliant USB PID devices and better fuzz/flat values for
high precision direct drive devices.
Supports Moza Racing, Cammus, VRS, FFBeast and more.
config HID_WACOM
tristate "Wacom Intuos/Graphire tablet support (USB)"
depends on USB_HID

View File

@@ -139,6 +139,7 @@ hid-uclogic-objs := hid-uclogic-core.o \
hid-uclogic-params.o
obj-$(CONFIG_HID_UCLOGIC) += hid-uclogic.o
obj-$(CONFIG_HID_UDRAW_PS3) += hid-udraw-ps3.o
obj-$(CONFIG_HID_UNIVERSAL_PIDFF) += hid-universal-pidff.o
obj-$(CONFIG_HID_LED) += hid-led.o
obj-$(CONFIG_HID_XIAOMI) += hid-xiaomi.o
obj-$(CONFIG_HID_XINMO) += hid-xinmo.o

View File

@@ -262,6 +262,10 @@
#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578
#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577
#define USB_VENDOR_ID_CAMMUS 0x3416
#define USB_DEVICE_ID_CAMMUS_C5 0x0301
#define USB_DEVICE_ID_CAMMUS_C12 0x0302
#define USB_VENDOR_ID_CANDO 0x2087
#define USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH 0x0703
#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
@@ -453,6 +457,11 @@
#define USB_VENDOR_ID_EVISION 0x320f
#define USB_DEVICE_ID_EVISION_ICL01 0x5041
#define USB_VENDOR_ID_FFBEAST 0x045b
#define USB_DEVICE_ID_FFBEAST_JOYSTICK 0x58f9
#define USB_DEVICE_ID_FFBEAST_RUDDER 0x5968
#define USB_DEVICE_ID_FFBEAST_WHEEL 0x59d7
#define USB_VENDOR_ID_FLATFROG 0x25b5
#define USB_DEVICE_ID_MULTITOUCH_3200 0x0002
@@ -813,6 +822,13 @@
#define I2C_DEVICE_ID_LG_8001 0x8001
#define I2C_DEVICE_ID_LG_7010 0x7010
#define USB_VENDOR_ID_LITE_STAR 0x11ff
#define USB_DEVICE_ID_PXN_V10 0x3245
#define USB_DEVICE_ID_PXN_V12 0x1212
#define USB_DEVICE_ID_PXN_V12_LITE 0x1112
#define USB_DEVICE_ID_PXN_V12_LITE_2 0x1211
#define USB_DEVICE_LITE_STAR_GT987_FF 0x2141
#define USB_VENDOR_ID_LOGITECH 0x046d
#define USB_DEVICE_ID_LOGITECH_Z_10_SPK 0x0a07
#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
@@ -960,6 +976,18 @@
#define USB_VENDOR_ID_MONTEREY 0x0566
#define USB_DEVICE_ID_GENIUS_KB29E 0x3004
#define USB_VENDOR_ID_MOZA 0x346e
#define USB_DEVICE_ID_MOZA_R3 0x0005
#define USB_DEVICE_ID_MOZA_R3_2 0x0015
#define USB_DEVICE_ID_MOZA_R5 0x0004
#define USB_DEVICE_ID_MOZA_R5_2 0x0014
#define USB_DEVICE_ID_MOZA_R9 0x0002
#define USB_DEVICE_ID_MOZA_R9_2 0x0012
#define USB_DEVICE_ID_MOZA_R12 0x0006
#define USB_DEVICE_ID_MOZA_R12_2 0x0016
#define USB_DEVICE_ID_MOZA_R16_R21 0x0000
#define USB_DEVICE_ID_MOZA_R16_R21_2 0x0010
#define USB_VENDOR_ID_MSI 0x1770
#define USB_DEVICE_ID_MSI_GT683R_LED_PANEL 0xff00
@@ -1371,6 +1399,9 @@
#define USB_DEVICE_ID_VELLEMAN_K8061_FIRST 0x8061
#define USB_DEVICE_ID_VELLEMAN_K8061_LAST 0x8068
#define USB_VENDOR_ID_VRS 0x0483
#define USB_DEVICE_ID_VRS_DFP 0xa355
#define USB_VENDOR_ID_VTL 0x0306
#define USB_DEVICE_ID_VTL_MULTITOUCH_FF3F 0xff3f

View File

@@ -0,0 +1,197 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* HID UNIVERSAL PIDFF
* hid-pidff wrapper for PID-enabled devices
* Handles device reports, quirks and extends usable button range
*
* Copyright (c) 2024, 2025 Makarenko Oleg
* Copyright (c) 2024, 2025 Tomasz Pakuła
*/
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/input-event-codes.h>
#include "hid-ids.h"
#define JOY_RANGE (BTN_DEAD - BTN_JOYSTICK + 1)
/*
* Map buttons manually to extend the default joystick button limit
*/
static int universal_pidff_input_mapping(struct hid_device *hdev,
struct hid_input *hi, struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
if ((usage->hid & HID_USAGE_PAGE) != HID_UP_BUTTON)
return 0;
if (field->application != HID_GD_JOYSTICK)
return 0;
int button = ((usage->hid - 1) & HID_USAGE);
int code = button + BTN_JOYSTICK;
/* Detect the end of JOYSTICK buttons range */
if (code > BTN_DEAD)
code = button + KEY_NEXT_FAVORITE - JOY_RANGE;
/*
* Map overflowing buttons to KEY_RESERVED to not ignore
* them and let them still trigger MSC_SCAN
*/
if (code > KEY_MAX)
code = KEY_RESERVED;
hid_map_usage(hi, usage, bit, max, EV_KEY, code);
hid_dbg(hdev, "Button %d: usage %d", button, code);
return 1;
}
/*
* Check if the device is PID and initialize it
* Add quirks after initialisation
*/
static int universal_pidff_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
int i, error;
error = hid_parse(hdev);
if (error) {
hid_err(hdev, "HID parse failed\n");
goto err;
}
error = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
if (error) {
hid_err(hdev, "HID hw start failed\n");
goto err;
}
/* Check if device contains PID usage page */
error = 1;
for (i = 0; i < hdev->collection_size; i++)
if ((hdev->collection[i].usage & HID_USAGE_PAGE) == HID_UP_PID) {
error = 0;
hid_dbg(hdev, "PID usage page found\n");
break;
}
/*
* Do not fail as this might be the second "device"
* just for additional buttons/axes. Exit cleanly if force
* feedback usage page wasn't found (included devices were
* tested and confirmed to be USB PID after all).
*/
if (error) {
hid_dbg(hdev, "PID usage page not found in the descriptor\n");
return 0;
}
/* Check if HID_PID support is enabled */
int (*init_function)(struct hid_device *, __u32);
init_function = hid_pidff_init_with_quirks;
if (!init_function) {
hid_warn(hdev, "HID_PID support not enabled!\n");
return 0;
}
error = init_function(hdev, id->driver_data);
if (error) {
hid_warn(hdev, "Error initialising force feedback\n");
goto err;
}
hid_info(hdev, "Universal pidff driver loaded sucesfully!");
return 0;
err:
return error;
}
static int universal_pidff_input_configured(struct hid_device *hdev,
struct hid_input *hidinput)
{
int axis;
struct input_dev *input = hidinput->input;
if (!input->absinfo)
return 0;
/* Decrease fuzz and deadzone on available axes */
for (axis = ABS_X; axis <= ABS_BRAKE; axis++) {
if (!test_bit(axis, input->absbit))
continue;
input_set_abs_params(input, axis,
input->absinfo[axis].minimum,
input->absinfo[axis].maximum,
axis == ABS_X ? 0 : 8, 0);
}
/* Remove fuzz and deadzone from the second joystick axis */
if (hdev->vendor == USB_VENDOR_ID_FFBEAST &&
hdev->product == USB_DEVICE_ID_FFBEAST_JOYSTICK)
input_set_abs_params(input, ABS_Y,
input->absinfo[ABS_Y].minimum,
input->absinfo[ABS_Y].maximum, 0, 0);
return 0;
}
static const struct hid_device_id universal_pidff_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R3),
.driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R3_2),
.driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R5),
.driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R5_2),
.driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R9),
.driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R9_2),
.driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R12),
.driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R12_2),
.driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R16_R21),
.driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R16_R21_2),
.driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_CAMMUS, USB_DEVICE_ID_CAMMUS_C5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CAMMUS, USB_DEVICE_ID_CAMMUS_C12) },
{ HID_USB_DEVICE(USB_VENDOR_ID_VRS, USB_DEVICE_ID_VRS_DFP),
.driver_data = HID_PIDFF_QUIRK_PERMISSIVE_CONTROL },
{ HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_JOYSTICK), },
{ HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_RUDDER), },
{ HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V10),
.driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12),
.driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12_LITE),
.driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12_LITE_2),
.driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_LITE_STAR_GT987_FF),
.driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
{ }
};
MODULE_DEVICE_TABLE(hid, universal_pidff_devices);
static struct hid_driver universal_pidff = {
.name = "hid-universal-pidff",
.id_table = universal_pidff_devices,
.input_mapping = universal_pidff_input_mapping,
.probe = universal_pidff_probe,
.input_configured = universal_pidff_input_configured
};
module_hid_driver(universal_pidff);
MODULE_DESCRIPTION("Universal driver for USB PID Force Feedback devices");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Makarenko Oleg <oleg@makarenk.ooo>");
MODULE_AUTHOR("Tomasz Pakuła <tomasz.pakula.oficjalny@gmail.com>");

View File

@@ -21,6 +21,7 @@
#include "usbhid.h"
#define PID_EFFECTS_MAX 64
#define PID_INFINITE 0xffff
/* Report usage table used to put reports into an array */
@@ -136,6 +137,9 @@ static const u8 pidff_block_load_status[] = { 0x8c, 0x8d };
#define PID_EFFECT_STOP 1
static const u8 pidff_effect_operation_status[] = { 0x79, 0x7b };
/* Polar direction 90 degrees (North) */
#define PIDFF_FIXED_WHEEL_DIRECTION 0x4000
struct pidff_usage {
struct hid_field *field;
s32 *value;
@@ -184,6 +188,8 @@ struct pidff_device {
int operation_id[sizeof(pidff_effect_operation_status)];
int pid_id[PID_EFFECTS_MAX];
u32 quirks;
};
/*
@@ -261,10 +267,22 @@ static void pidff_set_envelope_report(struct pidff_device *pidff,
static int pidff_needs_set_envelope(struct ff_envelope *envelope,
struct ff_envelope *old)
{
return envelope->attack_level != old->attack_level ||
envelope->fade_level != old->fade_level ||
bool needs_new_envelope;
needs_new_envelope = envelope->attack_level != 0 ||
envelope->fade_level != 0 ||
envelope->attack_length != 0 ||
envelope->fade_length != 0;
if (!needs_new_envelope)
return false;
if (!old)
return needs_new_envelope;
return envelope->attack_level != old->attack_level ||
envelope->fade_level != old->fade_level ||
envelope->attack_length != old->attack_length ||
envelope->fade_length != old->fade_length;
envelope->fade_length != old->fade_length;
}
/*
@@ -301,17 +319,28 @@ static void pidff_set_effect_report(struct pidff_device *pidff,
pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
pidff->set_effect_type->value[0] =
pidff->create_new_effect_type->value[0];
pidff->set_effect[PID_DURATION].value[0] = effect->replay.length;
/* Convert infinite length from Linux API (0)
to PID standard (NULL) if needed */
pidff->set_effect[PID_DURATION].value[0] =
effect->replay.length == 0 ? PID_INFINITE : effect->replay.length;
pidff->set_effect[PID_TRIGGER_BUTTON].value[0] = effect->trigger.button;
pidff->set_effect[PID_TRIGGER_REPEAT_INT].value[0] =
effect->trigger.interval;
pidff->set_effect[PID_GAIN].value[0] =
pidff->set_effect[PID_GAIN].field->logical_maximum;
pidff->set_effect[PID_DIRECTION_ENABLE].value[0] = 1;
pidff->effect_direction->value[0] =
pidff_rescale(effect->direction, 0xffff,
pidff->effect_direction);
pidff->set_effect[PID_START_DELAY].value[0] = effect->replay.delay;
/* Use fixed direction if needed */
pidff->effect_direction->value[0] = pidff_rescale(
pidff->quirks & HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION ?
PIDFF_FIXED_WHEEL_DIRECTION : effect->direction,
0xffff, pidff->effect_direction);
/* Omit setting delay field if it's missing */
if (!(pidff->quirks & HID_PIDFF_QUIRK_MISSING_DELAY))
pidff->set_effect[PID_START_DELAY].value[0] = effect->replay.delay;
hid_hw_request(pidff->hid, pidff->reports[PID_SET_EFFECT],
HID_REQ_SET_REPORT);
@@ -368,13 +397,19 @@ static int pidff_needs_set_periodic(struct ff_effect *effect,
static void pidff_set_condition_report(struct pidff_device *pidff,
struct ff_effect *effect)
{
int i;
int i, max_axis;
/* Devices missing Parameter Block Offset can only have one axis */
max_axis = pidff->quirks & HID_PIDFF_QUIRK_MISSING_PBO ? 1 : 2;
pidff->set_condition[PID_EFFECT_BLOCK_INDEX].value[0] =
pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
for (i = 0; i < 2; i++) {
pidff->set_condition[PID_PARAM_BLOCK_OFFSET].value[0] = i;
for (i = 0; i < max_axis; i++) {
/* Omit Parameter Block Offset if missing */
if (!(pidff->quirks & HID_PIDFF_QUIRK_MISSING_PBO))
pidff->set_condition[PID_PARAM_BLOCK_OFFSET].value[0] = i;
pidff_set_signed(&pidff->set_condition[PID_CP_OFFSET],
effect->u.condition[i].center);
pidff_set_signed(&pidff->set_condition[PID_POS_COEFFICIENT],
@@ -574,11 +609,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
pidff_set_effect_report(pidff, effect);
if (!old || pidff_needs_set_constant(effect, old))
pidff_set_constant_force_report(pidff, effect);
if (!old ||
pidff_needs_set_envelope(&effect->u.constant.envelope,
&old->u.constant.envelope))
pidff_set_envelope_report(pidff,
&effect->u.constant.envelope);
if (pidff_needs_set_envelope(&effect->u.constant.envelope,
old ? &old->u.constant.envelope : NULL))
pidff_set_envelope_report(pidff, &effect->u.constant.envelope);
break;
case FF_PERIODIC:
@@ -604,6 +637,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
return -EINVAL;
}
if (pidff->quirks & HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY)
type_id = PID_SINE;
error = pidff_request_effect_upload(pidff,
pidff->type_id[type_id]);
if (error)
@@ -613,11 +649,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
pidff_set_effect_report(pidff, effect);
if (!old || pidff_needs_set_periodic(effect, old))
pidff_set_periodic_report(pidff, effect);
if (!old ||
pidff_needs_set_envelope(&effect->u.periodic.envelope,
&old->u.periodic.envelope))
pidff_set_envelope_report(pidff,
&effect->u.periodic.envelope);
if (pidff_needs_set_envelope(&effect->u.periodic.envelope,
old ? &old->u.periodic.envelope : NULL))
pidff_set_envelope_report(pidff, &effect->u.periodic.envelope);
break;
case FF_RAMP:
@@ -631,11 +665,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
pidff_set_effect_report(pidff, effect);
if (!old || pidff_needs_set_ramp(effect, old))
pidff_set_ramp_force_report(pidff, effect);
if (!old ||
pidff_needs_set_envelope(&effect->u.ramp.envelope,
&old->u.ramp.envelope))
pidff_set_envelope_report(pidff,
&effect->u.ramp.envelope);
if (pidff_needs_set_envelope(&effect->u.ramp.envelope,
old ? &old->u.ramp.envelope : NULL))
pidff_set_envelope_report(pidff, &effect->u.ramp.envelope);
break;
case FF_SPRING:
@@ -736,7 +768,10 @@ static void pidff_autocenter(struct pidff_device *pidff, u16 magnitude)
pidff->set_effect[PID_TRIGGER_REPEAT_INT].value[0] = 0;
pidff_set(&pidff->set_effect[PID_GAIN], magnitude);
pidff->set_effect[PID_DIRECTION_ENABLE].value[0] = 1;
pidff->set_effect[PID_START_DELAY].value[0] = 0;
/* Omit setting delay field if it's missing */
if (!(pidff->quirks & HID_PIDFF_QUIRK_MISSING_DELAY))
pidff->set_effect[PID_START_DELAY].value[0] = 0;
hid_hw_request(pidff->hid, pidff->reports[PID_SET_EFFECT],
HID_REQ_SET_REPORT);
@@ -758,7 +793,13 @@ static void pidff_set_autocenter(struct input_dev *dev, u16 magnitude)
static int pidff_find_fields(struct pidff_usage *usage, const u8 *table,
struct hid_report *report, int count, int strict)
{
if (!report) {
pr_debug("pidff_find_fields, null report\n");
return -1;
}
int i, j, k, found;
int return_value = 0;
for (k = 0; k < count; k++) {
found = 0;
@@ -783,12 +824,22 @@ static int pidff_find_fields(struct pidff_usage *usage, const u8 *table,
if (found)
break;
}
if (!found && strict) {
if (!found && table[k] == pidff_set_effect[PID_START_DELAY]) {
pr_debug("Delay field not found, but that's OK\n");
pr_debug("Setting MISSING_DELAY quirk\n");
return_value |= HID_PIDFF_QUIRK_MISSING_DELAY;
}
else if (!found && table[k] == pidff_set_condition[PID_PARAM_BLOCK_OFFSET]) {
pr_debug("PBO field not found, but that's OK\n");
pr_debug("Setting MISSING_PBO quirk\n");
return_value |= HID_PIDFF_QUIRK_MISSING_PBO;
}
else if (!found && strict) {
pr_debug("failed to locate %d\n", k);
return -1;
}
}
return 0;
return return_value;
}
/*
@@ -871,6 +922,11 @@ static int pidff_reports_ok(struct pidff_device *pidff)
static struct hid_field *pidff_find_special_field(struct hid_report *report,
int usage, int enforce_min)
{
if (!report) {
pr_debug("pidff_find_special_field, null report\n");
return NULL;
}
int i;
for (i = 0; i < report->maxfield; i++) {
@@ -932,7 +988,8 @@ static int pidff_find_special_fields(struct pidff_device *pidff)
0x57, 0);
pidff->device_control =
pidff_find_special_field(pidff->reports[PID_DEVICE_CONTROL],
0x96, 1);
0x96, !(pidff->quirks & HID_PIDFF_QUIRK_PERMISSIVE_CONTROL));
pidff->block_load_status =
pidff_find_special_field(pidff->reports[PID_BLOCK_LOAD],
0x8b, 1);
@@ -1062,12 +1119,19 @@ static int pidff_find_effects(struct pidff_device *pidff,
*/
static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
{
int envelope_ok = 0;
int status = 0;
if (PIDFF_FIND_FIELDS(set_effect, PID_SET_EFFECT, 1)) {
/* Save info about the device not having the DELAY ffb field. */
status = PIDFF_FIND_FIELDS(set_effect, PID_SET_EFFECT, 1);
if (status == -1) {
hid_err(pidff->hid, "unknown set_effect report layout\n");
return -ENODEV;
}
pidff->quirks |= status;
if (status & HID_PIDFF_QUIRK_MISSING_DELAY)
hid_dbg(pidff->hid, "Adding MISSING_DELAY quirk\n");
PIDFF_FIND_FIELDS(block_load, PID_BLOCK_LOAD, 0);
if (!pidff->block_load[PID_EFFECT_BLOCK_INDEX].value) {
@@ -1085,13 +1149,10 @@ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
return -ENODEV;
}
if (!PIDFF_FIND_FIELDS(set_envelope, PID_SET_ENVELOPE, 1))
envelope_ok = 1;
if (pidff_find_special_fields(pidff) || pidff_find_effects(pidff, dev))
return -ENODEV;
if (!envelope_ok) {
if (PIDFF_FIND_FIELDS(set_envelope, PID_SET_ENVELOPE, 1)) {
if (test_and_clear_bit(FF_CONSTANT, dev->ffbit))
hid_warn(pidff->hid,
"has constant effect but no envelope\n");
@@ -1116,16 +1177,20 @@ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
clear_bit(FF_RAMP, dev->ffbit);
}
if ((test_bit(FF_SPRING, dev->ffbit) ||
test_bit(FF_DAMPER, dev->ffbit) ||
test_bit(FF_FRICTION, dev->ffbit) ||
test_bit(FF_INERTIA, dev->ffbit)) &&
PIDFF_FIND_FIELDS(set_condition, PID_SET_CONDITION, 1)) {
hid_warn(pidff->hid, "unknown condition effect layout\n");
clear_bit(FF_SPRING, dev->ffbit);
clear_bit(FF_DAMPER, dev->ffbit);
clear_bit(FF_FRICTION, dev->ffbit);
clear_bit(FF_INERTIA, dev->ffbit);
if (test_bit(FF_SPRING, dev->ffbit) ||
test_bit(FF_DAMPER, dev->ffbit) ||
test_bit(FF_FRICTION, dev->ffbit) ||
test_bit(FF_INERTIA, dev->ffbit)) {
status = PIDFF_FIND_FIELDS(set_condition, PID_SET_CONDITION, 1);
if (status < 0) {
hid_warn(pidff->hid, "unknown condition effect layout\n");
clear_bit(FF_SPRING, dev->ffbit);
clear_bit(FF_DAMPER, dev->ffbit);
clear_bit(FF_FRICTION, dev->ffbit);
clear_bit(FF_INERTIA, dev->ffbit);
}
pidff->quirks |= status;
}
if (test_bit(FF_PERIODIC, dev->ffbit) &&
@@ -1222,8 +1287,9 @@ static int pidff_check_autocenter(struct pidff_device *pidff,
/*
* Check if the device is PID and initialize it
* Set initial quirks
*/
int hid_pidff_init(struct hid_device *hid)
int hid_pidff_init_with_quirks(struct hid_device *hid, __u32 initial_quirks)
{
struct pidff_device *pidff;
struct hid_input *hidinput = list_entry(hid->inputs.next,
@@ -1245,6 +1311,7 @@ int hid_pidff_init(struct hid_device *hid)
return -ENOMEM;
pidff->hid = hid;
pidff->quirks = initial_quirks;
hid_device_io_start(hid);
@@ -1311,6 +1378,7 @@ int hid_pidff_init(struct hid_device *hid)
ff->playback = pidff_playback;
hid_info(dev, "Force feedback for USB HID PID devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
hid_dbg(dev, "Active quirks mask: 0x%x\n", pidff->quirks);
hid_device_io_stop(hid);
@@ -1322,3 +1390,14 @@ int hid_pidff_init(struct hid_device *hid)
kfree(pidff);
return error;
}
EXPORT_SYMBOL_GPL(hid_pidff_init_with_quirks);
/*
* Check if the device is PID and initialize it
* Wrapper made to keep the compatibility with old
* init function
*/
int hid_pidff_init(struct hid_device *hid)
{
return hid_pidff_init_with_quirks(hid, 0);
}

View File

@@ -832,7 +832,7 @@ static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
mutex_lock(&owner->rpm_lock);
if (&data->domain->domain != &exynos_identity_domain) {
if (data->domain) {
dev_dbg(data->sysmmu, "saving state\n");
__sysmmu_disable(data);
}
@@ -850,7 +850,7 @@ static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
mutex_lock(&owner->rpm_lock);
if (&data->domain->domain != &exynos_identity_domain) {
if (data->domain) {
dev_dbg(data->sysmmu, "restoring state\n");
__sysmmu_enable(data);
}

View File

@@ -1371,15 +1371,6 @@ static int mtk_iommu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
mutex_init(&data->mutex);
ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
"mtk-iommu.%pa", &ioaddr);
if (ret)
goto out_link_remove;
ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev);
if (ret)
goto out_sysfs_remove;
if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE)) {
list_add_tail(&data->list, data->plat_data->hw_list);
data->hw_list = data->plat_data->hw_list;
@@ -1389,19 +1380,28 @@ static int mtk_iommu_probe(struct platform_device *pdev)
data->hw_list = &data->hw_list_head;
}
ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
"mtk-iommu.%pa", &ioaddr);
if (ret)
goto out_list_del;
ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev);
if (ret)
goto out_sysfs_remove;
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
if (ret)
goto out_list_del;
goto out_device_unregister;
}
return ret;
out_list_del:
list_del(&data->list);
out_device_unregister:
iommu_device_unregister(&data->iommu);
out_sysfs_remove:
iommu_device_sysfs_remove(&data->iommu);
out_link_remove:
out_list_del:
list_del(&data->list);
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM))
device_link_remove(data->smicomm_dev, dev);
out_runtime_disable:

View File

@@ -549,8 +549,9 @@ static void s5p_mfc_enc_calc_src_size_v6(struct s5p_mfc_ctx *ctx)
case V4L2_PIX_FMT_NV21M:
ctx->stride[0] = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN_V6);
ctx->stride[1] = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN_V6);
ctx->luma_size = ctx->stride[0] * ALIGN(ctx->img_height, 16);
ctx->chroma_size = ctx->stride[0] * ALIGN(ctx->img_height / 2, 16);
ctx->luma_size = ALIGN(ctx->stride[0] * ALIGN(ctx->img_height, 16), 256);
ctx->chroma_size = ALIGN(ctx->stride[0] * ALIGN(ctx->img_height / 2, 16),
256);
break;
case V4L2_PIX_FMT_YUV420M:
case V4L2_PIX_FMT_YVU420M:

View File

@@ -3039,6 +3039,15 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceProtocol = 0,
.driver_info = UVC_INFO_QUIRK(UVC_QUIRK_PROBE_MINMAX
| UVC_QUIRK_IGNORE_SELECTOR_UNIT) },
/* Actions Microelectronics Co. Display capture-UVC05 */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x1de1,
.idProduct = 0xf105,
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_INFO_QUIRK(UVC_QUIRK_DISABLE_AUTOSUSPEND) },
/* NXP Semiconductors IR VIDEO */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,

View File

@@ -2579,6 +2579,91 @@ static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
}
}
static void dw_mci_push_data64_32(struct dw_mci *host, void *buf, int cnt)
{
struct mmc_data *data = host->data;
int init_cnt = cnt;
/* try and push anything in the part_buf */
if (unlikely(host->part_buf_count)) {
int len = dw_mci_push_part_bytes(host, buf, cnt);
buf += len;
cnt -= len;
if (host->part_buf_count == 8) {
mci_fifo_l_writeq(host->fifo_reg, host->part_buf);
host->part_buf_count = 0;
}
}
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (unlikely((unsigned long)buf & 0x7)) {
while (cnt >= 8) {
u64 aligned_buf[16];
int len = min(cnt & -8, (int)sizeof(aligned_buf));
int items = len >> 3;
int i;
/* memcpy from input buffer into aligned buffer */
memcpy(aligned_buf, buf, len);
buf += len;
cnt -= len;
/* push data from aligned buffer into fifo */
for (i = 0; i < items; ++i)
mci_fifo_l_writeq(host->fifo_reg, aligned_buf[i]);
}
} else
#endif
{
u64 *pdata = buf;
for (; cnt >= 8; cnt -= 8)
mci_fifo_l_writeq(host->fifo_reg, *pdata++);
buf = pdata;
}
/* put anything remaining in the part_buf */
if (cnt) {
dw_mci_set_part_bytes(host, buf, cnt);
/* Push data if we have reached the expected data length */
if ((data->bytes_xfered + init_cnt) ==
(data->blksz * data->blocks))
mci_fifo_l_writeq(host->fifo_reg, host->part_buf);
}
}
static void dw_mci_pull_data64_32(struct dw_mci *host, void *buf, int cnt)
{
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (unlikely((unsigned long)buf & 0x7)) {
while (cnt >= 8) {
/* pull data from fifo into aligned buffer */
u64 aligned_buf[16];
int len = min(cnt & -8, (int)sizeof(aligned_buf));
int items = len >> 3;
int i;
for (i = 0; i < items; ++i)
aligned_buf[i] = mci_fifo_l_readq(host->fifo_reg);
/* memcpy from aligned buffer into output buffer */
memcpy(buf, aligned_buf, len);
buf += len;
cnt -= len;
}
} else
#endif
{
u64 *pdata = buf;
for (; cnt >= 8; cnt -= 8)
*pdata++ = mci_fifo_l_readq(host->fifo_reg);
buf = pdata;
}
if (cnt) {
host->part_buf = mci_fifo_l_readq(host->fifo_reg);
dw_mci_pull_final_bytes(host, buf, cnt);
}
}
static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
{
int len;
@@ -3379,8 +3464,13 @@ int dw_mci_probe(struct dw_mci *host)
width = 16;
host->data_shift = 1;
} else if (i == 2) {
host->push_data = dw_mci_push_data64;
host->pull_data = dw_mci_pull_data64;
if ((host->quirks & DW_MMC_QUIRK_FIFO64_32)) {
host->push_data = dw_mci_push_data64_32;
host->pull_data = dw_mci_pull_data64_32;
} else {
host->push_data = dw_mci_push_data64;
host->pull_data = dw_mci_pull_data64;
}
width = 64;
host->data_shift = 3;
} else {

View File

@@ -281,6 +281,8 @@ struct dw_mci_board {
/* Support for longer data read timeout */
#define DW_MMC_QUIRK_EXTENDED_TMOUT BIT(0)
/* Force 32-bit access to the FIFO */
#define DW_MMC_QUIRK_FIFO64_32 BIT(1)
#define DW_MMC_240A 0x240a
#define DW_MMC_280A 0x280a
@@ -472,6 +474,31 @@ struct dw_mci_board {
#define mci_fifo_writel(__value, __reg) __raw_writel(__reg, __value)
#define mci_fifo_writeq(__value, __reg) __raw_writeq(__reg, __value)
/*
* Some dw_mmc devices have 64-bit FIFOs, but expect them to be
* accessed using two 32-bit accesses. If such controller is used
* with a 64-bit kernel, this has to be done explicitly.
*/
static inline u64 mci_fifo_l_readq(void __iomem *addr)
{
u64 ans;
u32 proxy[2];
proxy[0] = mci_fifo_readl(addr);
proxy[1] = mci_fifo_readl(addr + 4);
memcpy(&ans, proxy, 8);
return ans;
}
static inline void mci_fifo_l_writeq(void __iomem *addr, u64 value)
{
u32 proxy[2];
memcpy(proxy, &value, 8);
mci_fifo_writel(addr, proxy[0]);
mci_fifo_writel(addr + 4, proxy[1]);
}
/* Register access macros */
#define mci_readl(dev, reg) \
readl_relaxed((dev)->regs + SDMMC_##reg)

View File

@@ -386,6 +386,16 @@ static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data nxp_s32g2_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD |
FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_NR_IRQ_3 |
FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR |
FLEXCAN_QUIRK_SECONDARY_MB_IRQ,
};
static const struct can_bittiming_const flexcan_bittiming_const = {
.name = DRV_NAME,
.tseg1_min = 4,
@@ -1762,14 +1772,25 @@ static int flexcan_open(struct net_device *dev)
goto out_free_irq_boff;
}
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) {
err = request_irq(priv->irq_secondary_mb,
flexcan_irq, IRQF_SHARED, dev->name, dev);
if (err)
goto out_free_irq_err;
}
flexcan_chip_interrupts_enable(dev);
netif_start_queue(dev);
return 0;
out_free_irq_err:
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3)
free_irq(priv->irq_err, dev);
out_free_irq_boff:
free_irq(priv->irq_boff, dev);
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3)
free_irq(priv->irq_boff, dev);
out_free_irq:
free_irq(dev->irq, dev);
out_can_rx_offload_disable:
@@ -1794,6 +1815,9 @@ static int flexcan_close(struct net_device *dev)
netif_stop_queue(dev);
flexcan_chip_interrupts_disable(dev);
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ)
free_irq(priv->irq_secondary_mb, dev);
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
free_irq(priv->irq_err, dev);
free_irq(priv->irq_boff, dev);
@@ -2041,6 +2065,7 @@ static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
{ .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
{ .compatible = "fsl,lx2160ar1-flexcan", .data = &fsl_lx2160a_r1_devtype_data, },
{ .compatible = "nxp,s32g2-flexcan", .data = &nxp_s32g2_devtype_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, flexcan_of_match);
@@ -2187,6 +2212,14 @@ static int flexcan_probe(struct platform_device *pdev)
}
}
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) {
priv->irq_secondary_mb = platform_get_irq_byname(pdev, "mb-1");
if (priv->irq_secondary_mb < 0) {
err = priv->irq_secondary_mb;
goto failed_platform_get_irq;
}
}
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_FD) {
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
CAN_CTRLMODE_FD_NON_ISO;

View File

@@ -70,6 +70,10 @@
#define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16)
/* Setup stop mode with ATF SCMI protocol to support wakeup */
#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI BIT(17)
/* Device has two separate interrupt lines for two mailbox ranges, which
* both need to have an interrupt handler registered.
*/
#define FLEXCAN_QUIRK_SECONDARY_MB_IRQ BIT(18)
struct flexcan_devtype_data {
u32 quirks; /* quirks needed for different IP cores */
@@ -107,6 +111,7 @@ struct flexcan_priv {
int irq_boff;
int irq_err;
int irq_secondary_mb;
/* IPC handle when setup stop mode by System Controller firmware(scfw) */
struct imx_sc_ipc *sc_ipc_handle;

View File

@@ -506,6 +506,11 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
unsigned char *data;
int islcp;
/* Ensure we can safely access protocol field and LCP code */
if (!pskb_may_pull(skb, 3)) {
kfree_skb(skb);
return NULL;
}
data = skb->data;
proto = get_unaligned_be16(data);

View File

@@ -1421,6 +1421,19 @@ static const struct driver_info hg20f9_info = {
.data = FLAG_EEPROM_MAC,
};
static const struct driver_info lyconsys_fibergecko100_info = {
.description = "LyconSys FiberGecko 100 USB 2.0 to SFP Adapter",
.bind = ax88178_bind,
.status = asix_status,
.link_reset = ax88178_link_reset,
.reset = ax88178_link_reset,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
.data = 0x20061201,
};
static const struct usb_device_id products [] = {
{
// Linksys USB200M
@@ -1578,6 +1591,10 @@ static const struct usb_device_id products [] = {
// Linux Automation GmbH USB 10Base-T1L
USB_DEVICE(0x33f7, 0x0004),
.driver_info = (unsigned long) &lxausb_t1l_info,
}, {
/* LyconSys FiberGecko 100 */
USB_DEVICE(0x1d2a, 0x0801),
.driver_info = (unsigned long) &lyconsys_fibergecko100_info,
},
{ }, // END
};

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -1290,6 +1290,7 @@ static void ath11k_ahb_remove(struct platform_device *pdev)
ath11k_core_deinit(ab);
qmi_fail:
ath11k_fw_destroy(ab);
ath11k_ahb_free_resources(ab);
}
@@ -1309,6 +1310,7 @@ static void ath11k_ahb_shutdown(struct platform_device *pdev)
ath11k_core_deinit(ab);
free_resources:
ath11k_fw_destroy(ab);
ath11k_ahb_free_resources(ab);
}

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -2214,7 +2214,6 @@ void ath11k_core_deinit(struct ath11k_base *ab)
ath11k_hif_power_down(ab);
ath11k_mac_destroy(ab);
ath11k_core_soc_destroy(ab);
ath11k_fw_destroy(ab);
}
EXPORT_SYMBOL(ath11k_core_deinit);

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
@@ -104,14 +104,12 @@ void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
if (!ring->vaddr_unaligned)
return;
if (ring->cached) {
dma_unmap_single(ab->dev, ring->paddr_unaligned, ring->size,
DMA_FROM_DEVICE);
kfree(ring->vaddr_unaligned);
} else {
if (ring->cached)
dma_free_noncoherent(ab->dev, ring->size, ring->vaddr_unaligned,
ring->paddr_unaligned, DMA_FROM_DEVICE);
else
dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
ring->paddr_unaligned);
}
ring->vaddr_unaligned = NULL;
}
@@ -249,25 +247,14 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
default:
cached = false;
}
if (cached) {
ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
if (!ring->vaddr_unaligned)
return -ENOMEM;
ring->paddr_unaligned = dma_map_single(ab->dev,
ring->vaddr_unaligned,
ring->size,
DMA_FROM_DEVICE);
if (dma_mapping_error(ab->dev, ring->paddr_unaligned)) {
kfree(ring->vaddr_unaligned);
ring->vaddr_unaligned = NULL;
return -ENOMEM;
}
}
}
if (!cached)
if (cached)
ring->vaddr_unaligned = dma_alloc_noncoherent(ab->dev, ring->size,
&ring->paddr_unaligned,
DMA_FROM_DEVICE,
GFP_KERNEL);
else
ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
&ring->paddr_unaligned,
GFP_KERNEL);

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -166,3 +166,4 @@ void ath11k_fw_destroy(struct ath11k_base *ab)
{
release_firmware(ab->fw.fw);
}
EXPORT_SYMBOL(ath11k_fw_destroy);

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -981,6 +981,7 @@ static void ath11k_pci_remove(struct pci_dev *pdev)
ath11k_core_deinit(ab);
qmi_fail:
ath11k_fw_destroy(ab);
ath11k_mhi_unregister(ab_pci);
ath11k_pcic_free_irq(ab);

View File

@@ -2533,7 +2533,7 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
dest_idx = 0;
move_next:
ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
ath12k_hal_srng_src_get_next_entry(ab, srng);
ath12k_hal_srng_dst_get_next_entry(ab, srng);
num_buffs_reaped++;
}

View File

@@ -1541,6 +1541,7 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
ath12k_core_deinit(ab);
qmi_fail:
ath12k_fw_unmap(ab);
ath12k_mhi_unregister(ab_pci);
ath12k_pci_free_irq(ab);

View File

@@ -342,12 +342,10 @@ armpmu_add(struct perf_event *event, int flags)
if (idx < 0)
return idx;
/*
* If there is an event in the counter we are going to use then make
* sure it is disabled.
*/
/* The newly-allocated counter should be empty */
WARN_ON_ONCE(hw_events->events[idx]);
event->hw.idx = idx;
armpmu->disable(event);
hw_events->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;

View File

@@ -567,8 +567,10 @@ static int dwc_pcie_register_dev(struct pci_dev *pdev)
return PTR_ERR(plat_dev);
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
if (!dev_info)
if (!dev_info) {
platform_device_unregister(plat_dev);
return -ENOMEM;
}
/* Cache platform device to handle pci device hotplug */
dev_info->plat_dev = plat_dev;
@@ -724,6 +726,15 @@ static struct platform_driver dwc_pcie_pmu_driver = {
.driver = {.name = "dwc_pcie_pmu",},
};
static void dwc_pcie_cleanup_devices(void)
{
struct dwc_pcie_dev_info *dev_info, *tmp;
list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node) {
dwc_pcie_unregister_dev(dev_info);
}
}
static int __init dwc_pcie_pmu_init(void)
{
struct pci_dev *pdev = NULL;
@@ -736,7 +747,7 @@ static int __init dwc_pcie_pmu_init(void)
ret = dwc_pcie_register_dev(pdev);
if (ret) {
pci_dev_put(pdev);
return ret;
goto err_cleanup;
}
}
@@ -745,35 +756,35 @@ static int __init dwc_pcie_pmu_init(void)
dwc_pcie_pmu_online_cpu,
dwc_pcie_pmu_offline_cpu);
if (ret < 0)
return ret;
goto err_cleanup;
dwc_pcie_pmu_hp_state = ret;
ret = platform_driver_register(&dwc_pcie_pmu_driver);
if (ret)
goto platform_driver_register_err;
goto err_remove_cpuhp;
ret = bus_register_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
if (ret)
goto platform_driver_register_err;
goto err_unregister_driver;
notify = true;
return 0;
platform_driver_register_err:
err_unregister_driver:
platform_driver_unregister(&dwc_pcie_pmu_driver);
err_remove_cpuhp:
cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
err_cleanup:
dwc_pcie_cleanup_devices();
return ret;
}
static void __exit dwc_pcie_pmu_exit(void)
{
struct dwc_pcie_dev_info *dev_info, *tmp;
if (notify)
bus_unregister_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node)
dwc_pcie_unregister_dev(dev_info);
dwc_pcie_cleanup_devices();
platform_driver_unregister(&dwc_pcie_pmu_driver);
cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
}

View File

@@ -30,6 +30,7 @@
#define DRV_NAME "cros_ec_lpcs"
#define ACPI_DRV_NAME "GOOG0004"
#define FRMW_ACPI_DRV_NAME "FRMWC004"
/* True if ACPI device is present */
static bool cros_ec_lpc_acpi_device_found;
@@ -460,7 +461,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
acpi_status status;
struct cros_ec_device *ec_dev;
struct cros_ec_lpc *ec_lpc;
struct lpc_driver_data *driver_data;
const struct lpc_driver_data *driver_data;
u8 buf[2] = {};
int irq, ret;
u32 quirks;
@@ -472,6 +473,9 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
ec_lpc->mmio_memory_base = EC_LPC_ADDR_MEMMAP;
driver_data = platform_get_drvdata(pdev);
if (!driver_data)
driver_data = acpi_device_get_match_data(dev);
if (driver_data) {
quirks = driver_data->quirks;
@@ -625,12 +629,6 @@ static void cros_ec_lpc_remove(struct platform_device *pdev)
cros_ec_unregister(ec_dev);
}
static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = {
{ ACPI_DRV_NAME, 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, cros_ec_lpc_acpi_device_ids);
static const struct lpc_driver_data framework_laptop_npcx_lpc_driver_data __initconst = {
.quirks = CROS_EC_LPC_QUIRK_REMAP_MEMORY,
.quirk_mmio_memory_base = 0xE00,
@@ -642,6 +640,13 @@ static const struct lpc_driver_data framework_laptop_mec_lpc_driver_data __initc
.quirk_aml_mutex_name = "ECMT",
};
static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = {
{ ACPI_DRV_NAME, 0 },
{ FRMW_ACPI_DRV_NAME, (kernel_ulong_t)&framework_laptop_npcx_lpc_driver_data },
{ }
};
MODULE_DEVICE_TABLE(acpi, cros_ec_lpc_acpi_device_ids);
static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
{
/*
@@ -795,7 +800,8 @@ static int __init cros_ec_lpc_init(void)
int ret;
const struct dmi_system_id *dmi_match;
cros_ec_lpc_acpi_device_found = !!cros_ec_lpc_get_device(ACPI_DRV_NAME);
cros_ec_lpc_acpi_device_found = !!cros_ec_lpc_get_device(ACPI_DRV_NAME) ||
!!cros_ec_lpc_get_device(FRMW_ACPI_DRV_NAME);
dmi_match = dmi_first_match(cros_ec_lpc_dmi_table);

View File

@@ -8,6 +8,7 @@ config X86_ANDROID_TABLETS
depends on I2C && SPI && SERIAL_DEV_BUS && ACPI && EFI && GPIOLIB && PMIC_OPREGION
select NEW_LEDS
select LEDS_CLASS
select POWER_SUPPLY
help
X86 tablets which ship with Android as (part of) the factory image
typically have various problems with their DSDTs. The factory kernels

View File

@@ -81,13 +81,14 @@ extern atomic64_t event_counter;
/* Admin queue management definitions */
#define MPI3MR_ADMIN_REQ_Q_SIZE (2 * MPI3MR_PAGE_SIZE_4K)
#define MPI3MR_ADMIN_REPLY_Q_SIZE (4 * MPI3MR_PAGE_SIZE_4K)
#define MPI3MR_ADMIN_REPLY_Q_SIZE (8 * MPI3MR_PAGE_SIZE_4K)
#define MPI3MR_ADMIN_REQ_FRAME_SZ 128
#define MPI3MR_ADMIN_REPLY_FRAME_SZ 16
/* Operational queue management definitions */
#define MPI3MR_OP_REQ_Q_QD 512
#define MPI3MR_OP_REP_Q_QD 1024
#define MPI3MR_OP_REP_Q_QD2K 2048
#define MPI3MR_OP_REP_Q_QD4K 4096
#define MPI3MR_OP_REQ_Q_SEG_SIZE 4096
#define MPI3MR_OP_REP_Q_SEG_SIZE 4096
@@ -329,6 +330,7 @@ enum mpi3mr_reset_reason {
#define MPI3MR_RESET_REASON_OSTYPE_SHIFT 28
#define MPI3MR_RESET_REASON_IOCNUM_SHIFT 20
/* Queue type definitions */
enum queue_type {
MPI3MR_DEFAULT_QUEUE = 0,
@@ -388,6 +390,7 @@ struct mpi3mr_ioc_facts {
u16 max_msix_vectors;
u8 personality;
u8 dma_mask;
bool max_req_limit;
u8 protocol_flags;
u8 sge_mod_mask;
u8 sge_mod_value;
@@ -457,6 +460,8 @@ struct op_req_qinfo {
* @enable_irq_poll: Flag to indicate polling is enabled
* @in_use: Queue is handled by poll/ISR
* @qtype: Type of queue (types defined in enum queue_type)
* @qfull_watermark: Watermark defined in reply queue to avoid
* reply queue full
*/
struct op_reply_qinfo {
u16 ci;
@@ -472,6 +477,7 @@ struct op_reply_qinfo {
bool enable_irq_poll;
atomic_t in_use;
enum queue_type qtype;
u16 qfull_watermark;
};
/**
@@ -1091,6 +1097,7 @@ struct scmd_priv {
* @ts_update_interval: Timestamp update interval
* @reset_in_progress: Reset in progress flag
* @unrecoverable: Controller unrecoverable flag
* @io_admin_reset_sync: Manage state of I/O ops during an admin reset process
* @prev_reset_result: Result of previous reset
* @reset_mutex: Controller reset mutex
* @reset_waitq: Controller reset wait queue
@@ -1154,6 +1161,8 @@ struct scmd_priv {
* @snapdump_trigger_active: Snapdump trigger active flag
* @pci_err_recovery: PCI error recovery in progress
* @block_on_pci_err: Block IO during PCI error recovery
* @reply_qfull_count: Occurences of reply queue full avoidance kicking-in
* @prevent_reply_qfull: Enable reply queue prevention
*/
struct mpi3mr_ioc {
struct list_head list;
@@ -1277,6 +1286,7 @@ struct mpi3mr_ioc {
u16 ts_update_interval;
u8 reset_in_progress;
u8 unrecoverable;
u8 io_admin_reset_sync;
int prev_reset_result;
struct mutex reset_mutex;
wait_queue_head_t reset_waitq;
@@ -1352,6 +1362,8 @@ struct mpi3mr_ioc {
bool fw_release_trigger_active;
bool pci_err_recovery;
bool block_on_pci_err;
atomic_t reply_qfull_count;
bool prevent_reply_qfull;
};
/**

View File

@@ -3060,6 +3060,29 @@ reply_queue_count_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(reply_queue_count);
/**
* reply_qfull_count_show - Show reply qfull count
* @dev: class device
* @attr: Device attributes
* @buf: Buffer to copy
*
* Retrieves the current value of the reply_qfull_count from the mrioc structure and
* formats it as a string for display.
*
* Return: sysfs_emit() return
*/
static ssize_t
reply_qfull_count_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct mpi3mr_ioc *mrioc = shost_priv(shost);
return sysfs_emit(buf, "%u\n", atomic_read(&mrioc->reply_qfull_count));
}
static DEVICE_ATTR_RO(reply_qfull_count);
/**
* logging_level_show - Show controller debug level
* @dev: class device
@@ -3152,6 +3175,7 @@ static struct attribute *mpi3mr_host_attrs[] = {
&dev_attr_fw_queue_depth.attr,
&dev_attr_op_req_q_count.attr,
&dev_attr_reply_queue_count.attr,
&dev_attr_reply_qfull_count.attr,
&dev_attr_logging_level.attr,
&dev_attr_adp_state.attr,
NULL,

View File

@@ -17,7 +17,7 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
struct mpi3_ioc_facts_data *facts_data);
static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
struct mpi3mr_drv_cmd *drv_cmd);
static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc);
static int poll_queues;
module_param(poll_queues, int, 0444);
MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
@@ -459,7 +459,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
}
do {
if (mrioc->unrecoverable)
if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
break;
mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
@@ -554,7 +554,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
}
do {
if (mrioc->unrecoverable)
if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
break;
req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
@@ -2104,15 +2104,22 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
}
reply_qid = qidx + 1;
op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
if ((mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
!mrioc->pdev->revision)
op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
if (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) {
if (mrioc->pdev->revision)
op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
else
op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
} else
op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD2K;
op_reply_q->ci = 0;
op_reply_q->ephase = 1;
atomic_set(&op_reply_q->pend_ios, 0);
atomic_set(&op_reply_q->in_use, 0);
op_reply_q->enable_irq_poll = false;
op_reply_q->qfull_watermark =
op_reply_q->num_replies - (MPI3MR_THRESHOLD_REPLY_COUNT * 2);
if (!op_reply_q->q_segments) {
retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
@@ -2416,8 +2423,10 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
void *segment_base_addr;
u16 req_sz = mrioc->facts.op_req_sz;
struct segments *segments = op_req_q->q_segments;
struct op_reply_qinfo *op_reply_q = NULL;
reply_qidx = op_req_q->reply_qid - 1;
op_reply_q = mrioc->op_reply_qinfo + reply_qidx;
if (mrioc->unrecoverable)
return -EFAULT;
@@ -2448,6 +2457,15 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
goto out;
}
/* Reply queue is nearing to get full, push back IOs to SML */
if ((mrioc->prevent_reply_qfull == true) &&
(atomic_read(&op_reply_q->pend_ios) >
(op_reply_q->qfull_watermark))) {
atomic_inc(&mrioc->reply_qfull_count);
retval = -EAGAIN;
goto out;
}
segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
req_entry = (u8 *)segment_base_addr +
((pi % op_req_q->segment_qd) * req_sz);
@@ -3091,6 +3109,9 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
mrioc->facts.dma_mask = (facts_flags &
MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
mrioc->facts.dma_mask = (facts_flags &
MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
mrioc->facts.protocol_flags = facts_data->protocol_flags;
mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
@@ -4214,6 +4235,9 @@ retry_init:
mrioc->shost->transportt = mpi3mr_transport_template;
}
if (mrioc->facts.max_req_limit)
mrioc->prevent_reply_qfull = true;
mrioc->reply_sz = mrioc->facts.reply_sz;
retval = mpi3mr_check_reset_dma_mask(mrioc);
@@ -4370,6 +4394,7 @@ retry_init:
goto out_failed_noretry;
}
mrioc->io_admin_reset_sync = 0;
if (is_resume || mrioc->block_on_pci_err) {
dprint_reset(mrioc, "setting up single ISR\n");
retval = mpi3mr_setup_isr(mrioc, 1);
@@ -5228,6 +5253,55 @@ cleanup_drv_cmd:
drv_cmd->retry_count = 0;
}
/**
* mpi3mr_check_op_admin_proc -
* @mrioc: Adapter instance reference
*
* Check if any of the operation reply queues
* or the admin reply queue are currently in use.
* If any queue is in use, this function waits for
* a maximum of 10 seconds for them to become available.
*
* Return: 0 on success, non-zero on failure.
*/
static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc)
{
u16 timeout = 10 * 10;
u16 elapsed_time = 0;
bool op_admin_in_use = false;
do {
op_admin_in_use = false;
/* Check admin_reply queue first to exit early */
if (atomic_read(&mrioc->admin_reply_q_in_use) == 1)
op_admin_in_use = true;
else {
/* Check op_reply queues */
int i;
for (i = 0; i < mrioc->num_queues; i++) {
if (atomic_read(&mrioc->op_reply_qinfo[i].in_use) == 1) {
op_admin_in_use = true;
break;
}
}
}
if (!op_admin_in_use)
break;
msleep(100);
} while (++elapsed_time < timeout);
if (op_admin_in_use)
return 1;
return 0;
}
/**
* mpi3mr_soft_reset_handler - Reset the controller
* @mrioc: Adapter instance reference
@@ -5308,6 +5382,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
mpi3mr_ioc_disable_intr(mrioc);
mrioc->io_admin_reset_sync = 1;
if (snapdump) {
mpi3mr_set_diagsave(mrioc);
@@ -5335,6 +5410,16 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
goto out;
}
retval = mpi3mr_check_op_admin_proc(mrioc);
if (retval) {
ioc_err(mrioc, "Soft reset failed due to an Admin or I/O queue polling\n"
"thread still processing replies even after a 10 second\n"
"timeout. Marking the controller as unrecoverable!\n");
goto out;
}
if (mrioc->num_io_throttle_group !=
mrioc->facts.max_io_throttle_group) {
ioc_err(mrioc,

View File

@@ -2243,7 +2243,7 @@ spc_emulate_report_supp_op_codes(struct se_cmd *cmd)
response_length += spc_rsoc_encode_command_descriptor(
&buf[response_length], rctd, descr);
}
put_unaligned_be32(response_length - 3, buf);
put_unaligned_be32(response_length - 4, buf);
} else {
response_length = spc_rsoc_encode_one_command_descriptor(
&buf[response_length], rctd, descr,

View File

@@ -367,17 +367,25 @@ struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_ho
old_fds = old_fdt->fd;
new_fds = new_fdt->fd;
/*
* We may be racing against fd allocation from other threads using this
* files_struct, despite holding ->file_lock.
*
* alloc_fd() might have already claimed a slot, while fd_install()
* did not populate it yet. Note the latter operates locklessly, so
* the file can show up as we are walking the array below.
*
* At the same time we know no files will disappear as all other
* operations take the lock.
*
* Instead of trying to placate userspace racing with itself, we
* ref the file if we see it and mark the fd slot as unused otherwise.
*/
for (i = open_files; i != 0; i--) {
struct file *f = *old_fds++;
struct file *f = rcu_dereference_raw(*old_fds++);
if (f) {
get_file(f);
} else {
/*
* The fd may be claimed in the fd bitmap but not yet
* instantiated in the files array if a sibling thread
* is partway through open(). So make sure that this
* fd is available to the new process.
*/
__clear_open_fd(open_files - i, new_fdt);
}
rcu_assign_pointer(*new_fds++, f);
@@ -637,7 +645,7 @@ struct file *file_close_fd_locked(struct files_struct *files, unsigned fd)
return NULL;
fd = array_index_nospec(fd, fdt->max_fds);
file = fdt->fd[fd];
file = rcu_dereference_raw(fdt->fd[fd]);
if (file) {
rcu_assign_pointer(fdt->fd[fd], NULL);
__put_unused_fd(files, fd);
@@ -1227,7 +1235,7 @@ __releases(&files->file_lock)
*/
fdt = files_fdtable(files);
fd = array_index_nospec(fd, fdt->max_fds);
tofree = fdt->fd[fd];
tofree = rcu_dereference_raw(fdt->fd[fd]);
if (!tofree && fd_is_open(fd, fdt))
goto Ebusy;
get_file(file);

View File

@@ -1987,6 +1987,7 @@ static void warn_mandlock(void)
static int can_umount(const struct path *path, int flags)
{
struct mount *mnt = real_mount(path->mnt);
struct super_block *sb = path->dentry->d_sb;
if (!may_mount())
return -EPERM;
@@ -1996,7 +1997,7 @@ static int can_umount(const struct path *path, int flags)
return -EINVAL;
if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
return -EINVAL;
if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
if (flags & MNT_FORCE && !ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
return -EPERM;
return 0;
}

View File

@@ -1223,10 +1223,19 @@ void hid_quirks_exit(__u16 bus);
#ifdef CONFIG_HID_PID
int hid_pidff_init(struct hid_device *hid);
int hid_pidff_init_with_quirks(struct hid_device *hid, __u32 initial_quirks);
#else
#define hid_pidff_init NULL
#define hid_pidff_init_with_quirks NULL
#endif
/* HID PIDFF quirks */
#define HID_PIDFF_QUIRK_MISSING_DELAY BIT(0)
#define HID_PIDFF_QUIRK_MISSING_PBO BIT(1)
#define HID_PIDFF_QUIRK_PERMISSIVE_CONTROL BIT(2)
#define HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION BIT(3)
#define HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY BIT(4)
#define dbg_hid(fmt, ...) pr_debug("%s: " fmt, __FILE__, ##__VA_ARGS__)
#define hid_err(hid, fmt, ...) \

View File

@@ -170,6 +170,12 @@ struct hw_perf_event {
};
struct { /* aux / Intel-PT */
u64 aux_config;
/*
* For AUX area events, aux_paused cannot be a state
* flag because it can be updated asynchronously to
* state.
*/
unsigned int aux_paused;
};
struct { /* software */
struct hrtimer hrtimer;
@@ -294,6 +300,7 @@ struct perf_event_pmu_context;
#define PERF_PMU_CAP_NO_EXCLUDE 0x0040
#define PERF_PMU_CAP_AUX_OUTPUT 0x0080
#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
#define PERF_PMU_CAP_AUX_PAUSE 0x0200
/**
* pmu::scope
@@ -384,6 +391,8 @@ struct pmu {
#define PERF_EF_START 0x01 /* start the counter when adding */
#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
#define PERF_EF_PAUSE 0x08 /* AUX area event, pause tracing */
#define PERF_EF_RESUME 0x10 /* AUX area event, resume tracing */
/*
* Adds/Removes a counter to/from the PMU, can be done inside a
@@ -423,6 +432,18 @@ struct pmu {
*
* ->start() with PERF_EF_RELOAD will reprogram the counter
* value, must be preceded by a ->stop() with PERF_EF_UPDATE.
*
* ->stop() with PERF_EF_PAUSE will stop as simply as possible. Will not
* overlap another ->stop() with PERF_EF_PAUSE nor ->start() with
* PERF_EF_RESUME.
*
* ->start() with PERF_EF_RESUME will start as simply as possible but
* only if the counter is not otherwise stopped. Will not overlap
* another ->start() with PERF_EF_RESUME nor ->stop() with
* PERF_EF_PAUSE.
*
* Notably, PERF_EF_PAUSE/PERF_EF_RESUME *can* be concurrent with other
* ->stop()/->start() invocations, just not itself.
*/
void (*start) (struct perf_event *event, int flags);
void (*stop) (struct perf_event *event, int flags);
@@ -652,13 +673,15 @@ struct swevent_hlist {
struct rcu_head rcu_head;
};
#define PERF_ATTACH_CONTEXT 0x01
#define PERF_ATTACH_GROUP 0x02
#define PERF_ATTACH_TASK 0x04
#define PERF_ATTACH_TASK_DATA 0x08
#define PERF_ATTACH_ITRACE 0x10
#define PERF_ATTACH_SCHED_CB 0x20
#define PERF_ATTACH_CHILD 0x40
#define PERF_ATTACH_CONTEXT 0x0001
#define PERF_ATTACH_GROUP 0x0002
#define PERF_ATTACH_TASK 0x0004
#define PERF_ATTACH_TASK_DATA 0x0008
#define PERF_ATTACH_ITRACE 0x0010
#define PERF_ATTACH_SCHED_CB 0x0020
#define PERF_ATTACH_CHILD 0x0040
#define PERF_ATTACH_EXCLUSIVE 0x0080
#define PERF_ATTACH_CALLCHAIN 0x0100
struct bpf_prog;
struct perf_cgroup;
@@ -810,7 +833,6 @@ struct perf_event {
struct irq_work pending_disable_irq;
struct callback_head pending_task;
unsigned int pending_work;
struct rcuwait pending_work_wait;
atomic_t event_limit;
@@ -1703,6 +1725,13 @@ static inline bool has_aux(struct perf_event *event)
return event->pmu->setup_aux;
}
static inline bool has_aux_action(struct perf_event *event)
{
return event->attr.aux_sample_size ||
event->attr.aux_pause ||
event->attr.aux_resume;
}
static inline bool is_write_backward(struct perf_event *event)
{
return !!event->attr.write_backward;

View File

@@ -204,6 +204,7 @@ void printk_legacy_allow_panic_sync(void);
extern bool nbcon_device_try_acquire(struct console *con);
extern void nbcon_device_release(struct console *con);
void nbcon_atomic_flush_unsafe(void);
bool pr_flush(int timeout_ms, bool reset_on_progress);
#else
static inline __printf(1, 0)
int vprintk(const char *s, va_list args)
@@ -304,6 +305,11 @@ static inline void nbcon_atomic_flush_unsafe(void)
{
}
static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
{
return true;
}
#endif
bool this_cpu_in_panic(void);

View File

@@ -2826,6 +2826,11 @@ struct ieee80211_txq {
* implements MLO, so operation can continue on other links when one
* link is switching.
*
* @IEEE80211_HW_STRICT: strictly enforce certain things mandated by the spec
* but otherwise ignored/worked around for interoperability. This is a
* HW flag so drivers can opt in according to their own control, e.g. in
* testing.
*
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2885,6 +2890,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_DISALLOW_PUNCTURING,
IEEE80211_HW_DISALLOW_PUNCTURING_5GHZ,
IEEE80211_HW_HANDLES_QUIET_CSA,
IEEE80211_HW_STRICT,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS

View File

@@ -511,7 +511,16 @@ struct perf_event_attr {
__u16 sample_max_stack;
__u16 __reserved_2;
__u32 aux_sample_size;
__u32 __reserved_3;
union {
__u32 aux_action;
struct {
__u32 aux_start_paused : 1, /* start AUX area tracing paused */
aux_pause : 1, /* on overflow, pause AUX area tracing */
aux_resume : 1, /* on overflow, resume AUX area tracing */
__reserved_3 : 29;
};
};
/*
* User provided data if sigtrap=1, passed back to user via

View File

@@ -372,7 +372,7 @@ struct xen_mce {
#define XEN_MCE_LOG_LEN 32
struct xen_mce_log {
char signature[12]; /* "MACHINECHECK" */
char signature[12] __nonstring; /* "MACHINECHECK" */
unsigned len; /* = XEN_MCE_LOG_LEN */
unsigned next;
unsigned flags;

View File

@@ -2146,7 +2146,7 @@ static void perf_put_aux_event(struct perf_event *event)
static bool perf_need_aux_event(struct perf_event *event)
{
return !!event->attr.aux_output || !!event->attr.aux_sample_size;
return event->attr.aux_output || has_aux_action(event);
}
static int perf_get_aux_event(struct perf_event *event,
@@ -2171,6 +2171,10 @@ static int perf_get_aux_event(struct perf_event *event,
!perf_aux_output_match(event, group_leader))
return 0;
if ((event->attr.aux_pause || event->attr.aux_resume) &&
!(group_leader->pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE))
return 0;
if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux)
return 0;
@@ -5259,6 +5263,8 @@ static int exclusive_event_init(struct perf_event *event)
return -EBUSY;
}
event->attach_state |= PERF_ATTACH_EXCLUSIVE;
return 0;
}
@@ -5266,14 +5272,13 @@ static void exclusive_event_destroy(struct perf_event *event)
{
struct pmu *pmu = event->pmu;
if (!is_exclusive_pmu(pmu))
return;
/* see comment in exclusive_event_init() */
if (event->attach_state & PERF_ATTACH_TASK)
atomic_dec(&pmu->exclusive_cnt);
else
atomic_inc(&pmu->exclusive_cnt);
event->attach_state &= ~PERF_ATTACH_EXCLUSIVE;
}
static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
@@ -5308,35 +5313,58 @@ static bool exclusive_event_installable(struct perf_event *event,
static void perf_addr_filters_splice(struct perf_event *event,
struct list_head *head);
static void perf_pending_task_sync(struct perf_event *event)
/* vs perf_event_alloc() error */
static void __free_event(struct perf_event *event)
{
struct callback_head *head = &event->pending_task;
if (event->attach_state & PERF_ATTACH_CALLCHAIN)
put_callchain_buffers();
kfree(event->addr_filter_ranges);
if (event->attach_state & PERF_ATTACH_EXCLUSIVE)
exclusive_event_destroy(event);
if (is_cgroup_event(event))
perf_detach_cgroup(event);
if (event->destroy)
event->destroy(event);
if (!event->pending_work)
return;
/*
* If the task is queued to the current task's queue, we
* obviously can't wait for it to complete. Simply cancel it.
* Must be after ->destroy(), due to uprobe_perf_close() using
* hw.target.
*/
if (task_work_cancel(current, head)) {
event->pending_work = 0;
local_dec(&event->ctx->nr_no_switch_fast);
return;
if (event->hw.target)
put_task_struct(event->hw.target);
if (event->pmu_ctx) {
/*
* put_pmu_ctx() needs an event->ctx reference, because of
* epc->ctx.
*/
WARN_ON_ONCE(!event->ctx);
WARN_ON_ONCE(event->pmu_ctx->ctx != event->ctx);
put_pmu_ctx(event->pmu_ctx);
}
/*
* All accesses related to the event are within the same RCU section in
* perf_pending_task(). The RCU grace period before the event is freed
* will make sure all those accesses are complete by then.
* perf_event_free_task() relies on put_ctx() being 'last', in
* particular all task references must be cleaned up.
*/
rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE);
if (event->ctx)
put_ctx(event->ctx);
if (event->pmu)
module_put(event->pmu->module);
call_rcu(&event->rcu_head, free_event_rcu);
}
/* vs perf_event_alloc() success */
static void _free_event(struct perf_event *event)
{
irq_work_sync(&event->pending_irq);
irq_work_sync(&event->pending_disable_irq);
perf_pending_task_sync(event);
unaccount_event(event);
@@ -5354,42 +5382,10 @@ static void _free_event(struct perf_event *event)
mutex_unlock(&event->mmap_mutex);
}
if (is_cgroup_event(event))
perf_detach_cgroup(event);
if (!event->parent) {
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
put_callchain_buffers();
}
perf_event_free_bpf_prog(event);
perf_addr_filters_splice(event, NULL);
kfree(event->addr_filter_ranges);
if (event->destroy)
event->destroy(event);
/*
* Must be after ->destroy(), due to uprobe_perf_close() using
* hw.target.
*/
if (event->hw.target)
put_task_struct(event->hw.target);
if (event->pmu_ctx)
put_pmu_ctx(event->pmu_ctx);
/*
* perf_event_free_task() relies on put_ctx() being 'last', in particular
* all task references must be cleaned up.
*/
if (event->ctx)
put_ctx(event->ctx);
exclusive_event_destroy(event);
module_put(event->pmu->module);
call_rcu(&event->rcu_head, free_event_rcu);
__free_event(event);
}
/*
@@ -5461,10 +5457,17 @@ static void perf_remove_from_owner(struct perf_event *event)
static void put_event(struct perf_event *event)
{
struct perf_event *parent;
if (!atomic_long_dec_and_test(&event->refcount))
return;
parent = event->parent;
_free_event(event);
/* Matches the refcount bump in inherit_event() */
if (parent)
put_event(parent);
}
/*
@@ -5548,11 +5551,6 @@ again:
if (tmp == child) {
perf_remove_from_context(child, DETACH_GROUP);
list_move(&child->child_list, &free_list);
/*
* This matches the refcount bump in inherit_event();
* this can't be the last reference.
*/
put_event(event);
} else {
var = &ctx->refcount;
}
@@ -5578,7 +5576,8 @@ again:
void *var = &child->ctx->refcount;
list_del(&child->child_list);
free_event(child);
/* Last reference unless ->pending_task work is pending */
put_event(child);
/*
* Wake any perf_event_free_task() waiting for this event to be
@@ -5589,7 +5588,11 @@ again:
}
no_ctx:
put_event(event); /* Must be the 'last' reference */
/*
* Last reference unless ->pending_task work is pending on this event
* or any of its children.
*/
put_event(event);
return 0;
}
EXPORT_SYMBOL_GPL(perf_event_release_kernel);
@@ -6974,12 +6977,6 @@ static void perf_pending_task(struct callback_head *head)
struct perf_event *event = container_of(head, struct perf_event, pending_task);
int rctx;
/*
* All accesses to the event must belong to the same implicit RCU read-side
* critical section as the ->pending_work reset. See comment in
* perf_pending_task_sync().
*/
rcu_read_lock();
/*
* If we 'fail' here, that's OK, it means recursion is already disabled
* and we won't recurse 'further'.
@@ -6990,9 +6987,8 @@ static void perf_pending_task(struct callback_head *head)
event->pending_work = 0;
perf_sigtrap(event);
local_dec(&event->ctx->nr_no_switch_fast);
rcuwait_wake_up(&event->pending_work_wait);
}
rcu_read_unlock();
put_event(event);
if (rctx >= 0)
perf_swevent_put_recursion_context(rctx);
@@ -8015,6 +8011,49 @@ void perf_prepare_header(struct perf_event_header *header,
WARN_ON_ONCE(header->size & 7);
}
static void __perf_event_aux_pause(struct perf_event *event, bool pause)
{
if (pause) {
if (!event->hw.aux_paused) {
event->hw.aux_paused = 1;
event->pmu->stop(event, PERF_EF_PAUSE);
}
} else {
if (event->hw.aux_paused) {
event->hw.aux_paused = 0;
event->pmu->start(event, PERF_EF_RESUME);
}
}
}
static void perf_event_aux_pause(struct perf_event *event, bool pause)
{
struct perf_buffer *rb;
if (WARN_ON_ONCE(!event))
return;
rb = ring_buffer_get(event);
if (!rb)
return;
scoped_guard (irqsave) {
/*
* Guard against self-recursion here. Another event could trip
* this same from NMI context.
*/
if (READ_ONCE(rb->aux_in_pause_resume))
break;
WRITE_ONCE(rb->aux_in_pause_resume, 1);
barrier();
__perf_event_aux_pause(event, pause);
barrier();
WRITE_ONCE(rb->aux_in_pause_resume, 0);
}
ring_buffer_put(rb);
}
static __always_inline int
__perf_event_output(struct perf_event *event,
struct perf_sample_data *data,
@@ -9818,9 +9857,12 @@ static int __perf_event_overflow(struct perf_event *event,
ret = __perf_event_account_interrupt(event, throttle);
if (event->attr.aux_pause)
perf_event_aux_pause(event->aux_event, true);
if (event->prog && event->prog->type == BPF_PROG_TYPE_PERF_EVENT &&
!bpf_overflow_handler(event, data, regs))
return ret;
goto out;
/*
* XXX event_limit might not quite work as expected on inherited
@@ -9854,6 +9896,7 @@ static int __perf_event_overflow(struct perf_event *event,
!task_work_add(current, &event->pending_task, notify_mode)) {
event->pending_work = pending_id;
local_inc(&event->ctx->nr_no_switch_fast);
WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount));
event->pending_addr = 0;
if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
@@ -9882,6 +9925,9 @@ static int __perf_event_overflow(struct perf_event *event,
event->pending_wakeup = 1;
irq_work_queue(&event->pending_irq);
}
out:
if (event->attr.aux_resume)
perf_event_aux_pause(event->aux_event, false);
return ret;
}
@@ -11947,8 +11993,10 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
event->destroy(event);
}
if (ret)
if (ret) {
event->pmu = NULL;
module_put(pmu->module);
}
return ret;
}
@@ -12197,7 +12245,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
init_irq_work(&event->pending_irq, perf_pending_irq);
event->pending_disable_irq = IRQ_WORK_INIT_HARD(perf_pending_disable);
init_task_work(&event->pending_task, perf_pending_task);
rcuwait_init(&event->pending_work_wait);
mutex_init(&event->mmap_mutex);
raw_spin_lock_init(&event->addr_filters.lock);
@@ -12276,7 +12323,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
* See perf_output_read().
*/
if (has_inherit_and_sample_read(attr) && !(attr->sample_type & PERF_SAMPLE_TID))
goto err_ns;
goto err;
if (!has_branch_stack(event))
event->attr.branch_sample_type = 0;
@@ -12284,7 +12331,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
pmu = perf_init_event(event);
if (IS_ERR(pmu)) {
err = PTR_ERR(pmu);
goto err_ns;
goto err;
}
/*
@@ -12294,24 +12341,38 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
*/
if (pmu->task_ctx_nr == perf_invalid_context && (task || cgroup_fd != -1)) {
err = -EINVAL;
goto err_pmu;
goto err;
}
if (event->attr.aux_output &&
!(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) {
(!(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT) ||
event->attr.aux_pause || event->attr.aux_resume)) {
err = -EOPNOTSUPP;
goto err_pmu;
goto err;
}
if (event->attr.aux_pause && event->attr.aux_resume) {
err = -EINVAL;
goto err;
}
if (event->attr.aux_start_paused) {
if (!(pmu->capabilities & PERF_PMU_CAP_AUX_PAUSE)) {
err = -EOPNOTSUPP;
goto err;
}
event->hw.aux_paused = 1;
}
if (cgroup_fd != -1) {
err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
if (err)
goto err_pmu;
goto err;
}
err = exclusive_event_init(event);
if (err)
goto err_pmu;
goto err;
if (has_addr_filter(event)) {
event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
@@ -12319,7 +12380,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
GFP_KERNEL);
if (!event->addr_filter_ranges) {
err = -ENOMEM;
goto err_per_task;
goto err;
}
/*
@@ -12344,41 +12405,22 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
err = get_callchain_buffers(attr->sample_max_stack);
if (err)
goto err_addr_filters;
goto err;
event->attach_state |= PERF_ATTACH_CALLCHAIN;
}
}
err = security_perf_event_alloc(event);
if (err)
goto err_callchain_buffer;
goto err;
/* symmetric to unaccount_event() in _free_event() */
account_event(event);
return event;
err_callchain_buffer:
if (!event->parent) {
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
put_callchain_buffers();
}
err_addr_filters:
kfree(event->addr_filter_ranges);
err_per_task:
exclusive_event_destroy(event);
err_pmu:
if (is_cgroup_event(event))
perf_detach_cgroup(event);
if (event->destroy)
event->destroy(event);
module_put(pmu->module);
err_ns:
if (event->hw.target)
put_task_struct(event->hw.target);
call_rcu(&event->rcu_head, free_event_rcu);
err:
__free_event(event);
return ERR_PTR(err);
}
@@ -13098,7 +13140,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
* Grouping is not supported for kernel events, neither is 'AUX',
* make sure the caller's intentions are adjusted.
*/
if (attr->aux_output)
if (attr->aux_output || attr->aux_action)
return ERR_PTR(-EINVAL);
event = perf_event_alloc(attr, cpu, task, NULL, NULL,
@@ -13345,8 +13387,7 @@ perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
* Kick perf_poll() for is_event_hup();
*/
perf_event_wakeup(parent_event);
free_event(event);
put_event(parent_event);
put_event(event);
return;
}
@@ -13464,13 +13505,11 @@ static void perf_free_event(struct perf_event *event,
list_del_init(&event->child_list);
mutex_unlock(&parent->child_mutex);
put_event(parent);
raw_spin_lock_irq(&ctx->lock);
perf_group_detach(event);
list_del_event(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
free_event(event);
put_event(event);
}
/*

View File

@@ -52,6 +52,7 @@ struct perf_buffer {
void (*free_aux)(void *);
refcount_t aux_refcount;
int aux_in_sampling;
int aux_in_pause_resume;
void **aux_pages;
void *aux_priv;

View File

@@ -1443,10 +1443,10 @@ static const char * const comp_alg_enabled[] = {
static int hibernate_compressor_param_set(const char *compressor,
const struct kernel_param *kp)
{
unsigned int sleep_flags;
int index, ret;
sleep_flags = lock_system_sleep();
if (!mutex_trylock(&system_transition_mutex))
return -EBUSY;
index = sysfs_match_string(comp_alg_enabled, compressor);
if (index >= 0) {
@@ -1458,7 +1458,7 @@ static int hibernate_compressor_param_set(const char *compressor,
ret = index;
}
unlock_system_sleep(sleep_flags);
mutex_unlock(&system_transition_mutex);
if (ret)
pr_debug("Cannot set specified compressor %s\n",

View File

@@ -2472,7 +2472,6 @@ asmlinkage __visible int _printk(const char *fmt, ...)
}
EXPORT_SYMBOL(_printk);
static bool pr_flush(int timeout_ms, bool reset_on_progress);
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
#else /* CONFIG_PRINTK */
@@ -2485,7 +2484,6 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
static u64 syslog_seq;
static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; }
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
#endif /* CONFIG_PRINTK */
@@ -4478,7 +4476,7 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
* Context: Process context. May sleep while acquiring console lock.
* Return: true if all usable printers are caught up.
*/
static bool pr_flush(int timeout_ms, bool reset_on_progress)
bool pr_flush(int timeout_ms, bool reset_on_progress)
{
return __pr_flush(NULL, timeout_ms, reset_on_progress);
}

View File

@@ -700,6 +700,7 @@ void kernel_power_off(void)
migrate_to_reboot_cpu();
syscore_shutdown();
pr_emerg("Power down\n");
pr_flush(1000, true);
kmsg_dump(KMSG_DUMP_SHUTDOWN);
machine_power_off();
}

View File

@@ -55,7 +55,7 @@
#ifndef DYNAMIC_BMI2
#if ((defined(__clang__) && __has_attribute(__target__)) \
|| (defined(__GNUC__) \
&& (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
&& (__GNUC__ >= 11))) \
&& (defined(__x86_64__) || defined(_M_X64)) \
&& !defined(__BMI2__)
# define DYNAMIC_BMI2 1

View File

@@ -1066,7 +1066,13 @@ static void page_pool_release_retry(struct work_struct *wq)
int inflight;
inflight = page_pool_release(pool);
if (!inflight)
/* In rare cases, a driver bug may cause inflight to go negative.
* Don't reschedule release if inflight is 0 or negative.
* - If 0, the page_pool has been destroyed
* - if negative, we will never recover
* in both cases no reschedule is necessary.
*/
if (inflight <= 0)
return;
/* Periodic warning for page pools the user can't see */

View File

@@ -353,7 +353,7 @@ void page_pool_unlist(struct page_pool *pool)
int page_pool_check_memory_provider(struct net_device *dev,
struct netdev_rx_queue *rxq)
{
struct net_devmem_dmabuf_binding *binding = rxq->mp_params.mp_priv;
void *binding = rxq->mp_params.mp_priv;
struct page_pool *pool;
struct hlist_node *n;

View File

@@ -499,6 +499,7 @@ static const char *hw_flag_names[] = {
FLAG(DISALLOW_PUNCTURING),
FLAG(DISALLOW_PUNCTURING_5GHZ),
FLAG(HANDLES_QUIET_CSA),
FLAG(STRICT),
#undef FLAG
};
@@ -531,6 +532,46 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
return rv;
}
static ssize_t hwflags_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
char buf[100];
int val;
if (count >= sizeof(buf))
return -EINVAL;
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
if (count && buf[count - 1] == '\n')
buf[count - 1] = '\0';
else
buf[count] = '\0';
if (sscanf(buf, "strict=%d", &val) == 1) {
switch (val) {
case 0:
ieee80211_hw_set(&local->hw, STRICT);
return count;
case 1:
__clear_bit(IEEE80211_HW_STRICT, local->hw.flags);
return count;
default:
return -EINVAL;
}
}
return -EINVAL;
}
static const struct file_operations hwflags_ops = {
.open = simple_open,
.read = hwflags_read,
.write = hwflags_write,
};
static ssize_t misc_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -581,7 +622,6 @@ static ssize_t queues_read(struct file *file, char __user *user_buf,
return simple_read_from_buffer(user_buf, count, ppos, buf, res);
}
DEBUGFS_READONLY_FILE_OPS(hwflags);
DEBUGFS_READONLY_FILE_OPS(queues);
DEBUGFS_READONLY_FILE_OPS(misc);
@@ -659,7 +699,7 @@ void debugfs_hw_add(struct ieee80211_local *local)
#ifdef CONFIG_PM
DEBUGFS_ADD_MODE(reset, 0200);
#endif
DEBUGFS_ADD(hwflags);
DEBUGFS_ADD_MODE(hwflags, 0600);
DEBUGFS_ADD(user_power);
DEBUGFS_ADD(power);
DEBUGFS_ADD(hw_conf);

View File

@@ -8,7 +8,7 @@
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 Intel Deutschland GmbH
* Copyright (C) 2018-2024 Intel Corporation
* Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/slab.h>
#include <linux/kernel.h>
@@ -812,6 +812,9 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
*/
static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata)
{
if (WARN_ON(!list_empty(&sdata->work.entry)))
wiphy_work_cancel(sdata->local->hw.wiphy, &sdata->work);
/* free extra data */
ieee80211_free_keys(sdata, false);

View File

@@ -166,6 +166,9 @@ ieee80211_determine_ap_chan(struct ieee80211_sub_if_data *sdata,
bool no_vht = false;
u32 ht_cfreq;
if (ieee80211_hw_check(&sdata->local->hw, STRICT))
ignore_ht_channel_mismatch = false;
*chandef = (struct cfg80211_chan_def) {
.chan = channel,
.width = NL80211_CHAN_WIDTH_20_NOHT,
@@ -385,7 +388,7 @@ ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata,
* zeroes, which is nonsense, and completely inconsistent with itself
* (it doesn't have 8 streams). Accept the settings in this case anyway.
*/
if (!ap_min_req_set)
if (!ieee80211_hw_check(&sdata->local->hw, STRICT) && !ap_min_req_set)
return true;
/* make sure the AP is consistent with itself
@@ -445,7 +448,7 @@ ieee80211_verify_sta_he_mcs_support(struct ieee80211_sub_if_data *sdata,
* zeroes, which is nonsense, and completely inconsistent with itself
* (it doesn't have 8 streams). Accept the settings in this case anyway.
*/
if (!ap_min_req_set)
if (!ieee80211_hw_check(&sdata->local->hw, STRICT) && !ap_min_req_set)
return true;
/* Need to go over for 80MHz, 160MHz and for 80+80 */
@@ -1212,13 +1215,15 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
* Some APs apparently get confused if our capabilities are better
* than theirs, so restrict what we advertise in the assoc request.
*/
if (!(ap_vht_cap->vht_cap_info &
cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)))
cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
else if (!(ap_vht_cap->vht_cap_info &
cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
if (!ieee80211_hw_check(&local->hw, STRICT)) {
if (!(ap_vht_cap->vht_cap_info &
cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)))
cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
else if (!(ap_vht_cap->vht_cap_info &
cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
}
/*
* If some other vif is using the MU-MIMO capability we cannot associate
@@ -1260,14 +1265,16 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
return mu_mimo_owner;
}
static void ieee80211_assoc_add_rates(struct sk_buff *skb,
static void ieee80211_assoc_add_rates(struct ieee80211_local *local,
struct sk_buff *skb,
enum nl80211_chan_width width,
struct ieee80211_supported_band *sband,
struct ieee80211_mgd_assoc_data *assoc_data)
{
u32 rates;
if (assoc_data->supp_rates_len) {
if (assoc_data->supp_rates_len &&
!ieee80211_hw_check(&local->hw, STRICT)) {
/*
* Get all rates supported by the device and the AP as
* some APs don't like getting a superset of their rates
@@ -1481,7 +1488,7 @@ static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
*capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
if (sband->band != NL80211_BAND_S1GHZ)
ieee80211_assoc_add_rates(skb, width, sband, assoc_data);
ieee80211_assoc_add_rates(local, skb, width, sband, assoc_data);
if (*capab & WLAN_CAPABILITY_SPECTRUM_MGMT ||
*capab & WLAN_CAPABILITY_RADIO_MEASURE) {
@@ -1925,7 +1932,8 @@ static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
* for some reason check it and want it to be set, set the bit for all
* pre-EHT connections as we used to do.
*/
if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_EHT)
if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_EHT &&
!ieee80211_hw_check(&local->hw, STRICT))
capab |= WLAN_CAPABILITY_ESS;
/* add the elements for the assoc (main) link */
@@ -4710,7 +4718,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
* 2G/3G/4G wifi routers, reported models include the "Onda PN51T",
* "Vodafone PocketWiFi 2", "ZTE MF60" and a similar T-Mobile device.
*/
if (!is_6ghz &&
if (!ieee80211_hw_check(&local->hw, STRICT) && !is_6ghz &&
((assoc_data->wmm && !elems->wmm_param) ||
(link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT &&
(!elems->ht_cap_elem || !elems->ht_operation)) ||
@@ -4846,6 +4854,15 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
bss_vht_cap = (const void *)elem->data;
}
if (ieee80211_hw_check(&local->hw, STRICT) &&
(!bss_vht_cap || memcmp(bss_vht_cap, elems->vht_cap_elem,
sizeof(*bss_vht_cap)))) {
rcu_read_unlock();
ret = false;
link_info(link, "VHT capabilities mismatch\n");
goto out;
}
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
elems->vht_cap_elem,
bss_vht_cap, link_sta);

View File

@@ -994,8 +994,9 @@ static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill,
NFT_PIPAPO_AVX2_BUCKET_LOAD8(5, lt, 8, pkt[8], bsize);
NFT_PIPAPO_AVX2_AND(6, 2, 3);
NFT_PIPAPO_AVX2_AND(3, 4, 7);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(7, lt, 9, pkt[9], bsize);
NFT_PIPAPO_AVX2_AND(0, 4, 5);
NFT_PIPAPO_AVX2_AND(0, 3, 5);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 10, pkt[10], bsize);
NFT_PIPAPO_AVX2_AND(2, 6, 7);
NFT_PIPAPO_AVX2_BUCKET_LOAD8(3, lt, 11, pkt[11], bsize);

View File

@@ -37,6 +37,7 @@
#include <linux/completion.h>
#include <linux/acpi.h>
#include <linux/pgtable.h>
#include <linux/dmi.h>
#ifdef CONFIG_X86
/* for snoop control */
@@ -1360,8 +1361,21 @@ static void azx_free(struct azx *chip)
if (use_vga_switcheroo(hda)) {
if (chip->disabled && hda->probe_continued)
snd_hda_unlock_devices(&chip->bus);
if (hda->vga_switcheroo_registered)
if (hda->vga_switcheroo_registered) {
vga_switcheroo_unregister_client(chip->pci);
/* Some GPUs don't have sound, and azx_first_init fails,
* leaving the device probed but non-functional. As long
* as it's probed, the PCI subsystem keeps its runtime
* PM status as active. Force it to suspended (as we
* actually stop the chip) to allow GPU to suspend via
* vga_switcheroo, and print a warning.
*/
dev_warn(&pci->dev, "GPU sound probed, but not operational: please add a quirk to driver_denylist\n");
pm_runtime_disable(&pci->dev);
pm_runtime_set_suspended(&pci->dev);
pm_runtime_enable(&pci->dev);
}
}
if (bus->chip_init) {
@@ -2071,6 +2085,27 @@ static const struct pci_device_id driver_denylist[] = {
{}
};
static struct pci_device_id driver_denylist_ideapad_z570[] = {
{ PCI_DEVICE_SUB(0x10de, 0x0bea, 0x0000, 0x0000) }, /* NVIDIA GF108 HDA */
{}
};
/* DMI-based denylist, to be used when:
* - PCI subsystem IDs are zero, impossible to distinguish from valid sound cards.
* - Different modifications of the same laptop use different GPU models.
*/
static const struct dmi_system_id driver_denylist_dmi[] = {
{
/* No HDA in NVIDIA DGPU. BIOS disables it, but quirk_nvidia_hda() reenables. */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"),
},
.driver_data = &driver_denylist_ideapad_z570,
},
{}
};
static const struct hda_controller_ops pci_hda_ops = {
.disable_msi_reset_irq = disable_msi_reset_irq,
.position_check = azx_position_check,
@@ -2081,6 +2116,7 @@ static DECLARE_BITMAP(probed_devs, SNDRV_CARDS);
static int azx_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
const struct dmi_system_id *dmi;
struct snd_card *card;
struct hda_intel *hda;
struct azx *chip;
@@ -2093,6 +2129,12 @@ static int azx_probe(struct pci_dev *pci,
return -ENODEV;
}
dmi = dmi_first_match(driver_denylist_dmi);
if (dmi && pci_match_id(dmi->driver_data, pci)) {
dev_info(&pci->dev, "Skipping the device on the DMI denylist\n");
return -ENODEV;
}
dev = find_first_zero_bit(probed_devs, SNDRV_CARDS);
if (dev >= SNDRV_CARDS)
return -ENODEV;

View File

@@ -11,6 +11,7 @@
#define ACP_DEVICE_ID 0x15E2
#define ACP63_REG_START 0x1240000
#define ACP63_REG_END 0x125C000
#define ACP63_PCI_REV 0x63
#define ACP_SOFT_RESET_SOFTRESET_AUDDONE_MASK 0x00010001
#define ACP_PGFSM_CNTL_POWER_ON_MASK 1

View File

@@ -559,7 +559,7 @@ static int snd_acp63_probe(struct pci_dev *pci,
/* Pink Sardine device check */
switch (pci->revision) {
case 0x63:
case ACP63_PCI_REV:
break;
default:
dev_dbg(&pci->dev, "acp63 pci device not found\n");

View File

@@ -339,6 +339,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
}
},
{
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "83J2"),
}
},
{
.driver_data = &acp6x_card,
.matches = {
@@ -584,6 +591,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "pang13"),
}
},
{
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."),
DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 15 C7UCX"),
}
},
{}
};

View File

@@ -488,11 +488,17 @@ static int fsl_audmix_probe(struct platform_device *pdev)
goto err_disable_pm;
}
priv->pdev = platform_device_register_data(dev, "imx-audmix", 0, NULL, 0);
if (IS_ERR(priv->pdev)) {
ret = PTR_ERR(priv->pdev);
dev_err(dev, "failed to register platform: %d\n", ret);
goto err_disable_pm;
/*
* If dais property exist, then register the imx-audmix card driver.
* otherwise, it should be linked by audio graph card.
*/
if (of_find_property(pdev->dev.of_node, "dais", NULL)) {
priv->pdev = platform_device_register_data(dev, "imx-audmix", 0, NULL, 0);
if (IS_ERR(priv->pdev)) {
ret = PTR_ERR(priv->pdev);
dev_err(dev, "failed to register platform: %d\n", ret);
goto err_disable_pm;
}
}
return 0;

View File

@@ -1269,8 +1269,8 @@ static int sof_widget_parse_tokens(struct snd_soc_component *scomp, struct snd_s
struct snd_sof_tuple *new_tuples;
num_tuples += token_list[object_token_list[i]].count * (num_sets - 1);
new_tuples = krealloc(swidget->tuples,
sizeof(*new_tuples) * num_tuples, GFP_KERNEL);
new_tuples = krealloc_array(swidget->tuples,
num_tuples, sizeof(*new_tuples), GFP_KERNEL);
if (!new_tuples) {
ret = -ENOMEM;
goto err;

View File

@@ -489,16 +489,84 @@ static void ch345_broken_sysex_input(struct snd_usb_midi_in_endpoint *ep,
/*
* CME protocol: like the standard protocol, but SysEx commands are sent as a
* single USB packet preceded by a 0x0F byte.
* single USB packet preceded by a 0x0F byte, as are system realtime
* messages and MIDI Active Sensing.
* Also, multiple messages can be sent in the same packet.
*/
static void snd_usbmidi_cme_input(struct snd_usb_midi_in_endpoint *ep,
uint8_t *buffer, int buffer_length)
{
if (buffer_length < 2 || (buffer[0] & 0x0f) != 0x0f)
snd_usbmidi_standard_input(ep, buffer, buffer_length);
else
snd_usbmidi_input_data(ep, buffer[0] >> 4,
&buffer[1], buffer_length - 1);
int remaining = buffer_length;
/*
* CME send sysex, song position pointer, system realtime
* and active sensing using CIN 0x0f, which in the standard
* is only intended for single byte unparsed data.
* So we need to interpret these here before sending them on.
* By default, we assume single byte data, which is true
* for system realtime (midi clock, start, stop and continue)
* and active sensing, and handle the other (known) cases
* separately.
* In contrast to the standard, CME does not split sysex
* into multiple 4-byte packets, but lumps everything together
* into one. In addition, CME can string multiple messages
* together in the same packet; pressing the Record button
* on an UF6 sends a sysex message directly followed
* by a song position pointer in the same packet.
* For it to have any reasonable meaning, a sysex message
* needs to be at least 3 bytes in length (0xf0, id, 0xf7),
* corresponding to a packet size of 4 bytes, and the ones sent
* by CME devices are 6 or 7 bytes, making the packet fragments
* 7 or 8 bytes long (six or seven bytes plus preceding CN+CIN byte).
* For the other types, the packet size is always 4 bytes,
* as per the standard, with the data size being 3 for SPP
* and 1 for the others.
* Thus all packet fragments are at least 4 bytes long, so we can
* skip anything that is shorter; this also conveniantly skips
* packets with size 0, which CME devices continuously send when
* they have nothing better to do.
* Another quirk is that sometimes multiple messages are sent
* in the same packet. This has been observed for midi clock
* and active sensing i.e. 0x0f 0xf8 0x00 0x00 0x0f 0xfe 0x00 0x00,
* but also multiple note ons/offs, and control change together
* with MIDI clock. Similarly, some sysex messages are followed by
* the song position pointer in the same packet, and occasionally
* additionally by a midi clock or active sensing.
* We handle this by looping over all data and parsing it along the way.
*/
while (remaining >= 4) {
int source_length = 4; /* default */
if ((buffer[0] & 0x0f) == 0x0f) {
int data_length = 1; /* default */
if (buffer[1] == 0xf0) {
/* Sysex: Find EOX and send on whole message. */
/* To kick off the search, skip the first
* two bytes (CN+CIN and SYSEX (0xf0).
*/
uint8_t *tmp_buf = buffer + 2;
int tmp_length = remaining - 2;
while (tmp_length > 1 && *tmp_buf != 0xf7) {
tmp_buf++;
tmp_length--;
}
data_length = tmp_buf - buffer;
source_length = data_length + 1;
} else if (buffer[1] == 0xf2) {
/* Three byte song position pointer */
data_length = 3;
}
snd_usbmidi_input_data(ep, buffer[0] >> 4,
&buffer[1], data_length);
} else {
/* normal channel events */
snd_usbmidi_standard_input(ep, buffer, source_length);
}
buffer += source_length;
remaining -= source_length;
}
}
/*

View File

@@ -120,6 +120,10 @@ out_dir:
struct config *prepare_default_config()
{
struct config *config = malloc(sizeof(struct config));
if (!config) {
perror("malloc");
return NULL;
}
dprintf("loading defaults\n");