Merge 6.12.24 into android16-6.12-lts

GKI (arm64) relevant 98 out of 394 changes, affecting 131 files +1443/-762
  40bc55e4fc cgroup/cpuset: Fix incorrect isolated_cpus update in update_parent_effective_cpumask() [1 file, +3/-3]
  9701dcbf5f cgroup/cpuset: Fix error handling in remote_partition_disable() [1 file, +20/-9]
  2dbd1b1660 cgroup/cpuset: Revert "Allow suppression of sched domain rebuild in update_cpumasks_hier()" [1 file, +14/-25]
  6b145f8b22 cgroup/cpuset: Enforce at most one rebuild_sched_domains_locked() call per operation [1 file, +33/-16]
  1b06f00eda cgroup/cpuset: Further optimize code if CONFIG_CPUSETS_V1 not set [1 file, +19/-20]
  cdb6e724e7 cgroup/cpuset: Fix race between newly created partition and dying one [4 files, +25/-4]
  179ef2f810 gpiolib: of: Fix the choice for Ingenic NAND quirk [1 file, +2/-0]
  cb8372e54f ublk: refactor recovery configuration flag helpers [1 file, +42/-20]
  caa5c8a235 ublk: fix handling recovery & reissue in ublk_abort_queue() [1 file, +26/-4]
  7c5957f790 tipc: fix memory leak in tipc_link_xmit [1 file, +1/-0]
  4d55144b12 codel: remove sch->q.qlen check before qdisc_tree_reduce_backlog() [2 files, +3/-8]
  b2f3c3d57a tc: Ensure we have enough buffer space when sending filter netlink notifications [1 file, +45/-21]
  a065b99605 net: ethtool: Don't call .cleanup_data when prepare_data fails [1 file, +5/-3]
  70449ca406 net_sched: sch_sfq: use a temporary work area for validating configuration [1 file, +44/-12]
  f86293adce net_sched: sch_sfq: move the limit validation [1 file, +6/-4]
  6d98cd6342 net: phy: move phy_link_change() prior to mdio_bus_phy_may_suspend() [1 file, +13/-13]
  a6ed6f8ec8 net: phy: allow MDIO bus PM ops to start/stop state machine for phylink-controlled PHY [1 file, +29/-2]
  cc16f7402a ipv6: Align behavior across nexthops during path selection [1 file, +4/-4]
  c61feda373 perf/core: Add aux_pause, aux_resume, aux_start_paused [4 files, +110/-5]
  7ef5aa081f perf/core: Simplify the perf_event_alloc() error path [2 files, +78/-76]
  fa1827fa96 perf: Fix hang while freeing sigtrap event [2 files, +18/-47]
  52535688c2 fs: consistently deref the files table with rcu_dereference_raw() [1 file, +17/-9]
  67e85cfa95 umount: Allow superblock owners to force umount [1 file, +2/-1]
  1b3ebfb15d perf: arm_pmu: Don't disable counter in armpmu_add() [1 file, +3/-5]
  11ae4fec1f PM: hibernate: Avoid deadlock in hibernate_compressor_param_set() [1 file, +3/-3]
  ead1fc9f93 Flush console log from kernel_power_off() [3 files, +8/-3]
  cb58e90920 arm64: cputype: Add QCOM_CPU_PART_KRYO_3XX_GOLD [1 file, +2/-0]
  3c057a4904 media: uvcvideo: Add quirk for Actions UVC05 [1 file, +9/-0]
  cb1c6cb110 ALSA: usb-audio: Fix CME quirk for UF series keyboards [1 file, +74/-6]
  a6bf0fd322 net: page_pool: don't cast mp param to devmem [1 file, +1/-1]
  c6e50cb8bf f2fs: don't retry IO for corrupted data scenario [1 file, +4/-0]
  de94d0ca9e net: usb: asix_devices: add FiberGecko DeviceID [1 file, +17/-0]
  7204335d19 page_pool: avoid infinite loop to schedule delayed worker [1 file, +7/-1]
  ecc4613316 f2fs: fix to avoid out-of-bounds access in f2fs_truncate_inode_blocks() [1 file, +8/-1]
  5f815757e6 ext4: protect ext4_release_dquot against freezing [1 file, +17/-0]
  aa39d45071 Revert "f2fs: rebuild nat_bits during umount" [3 files, +59/-95]
  eb59cc31b6 ext4: ignore xattrs past end [1 file, +10/-1]
  a8a8076210 cdc_ether|r8152: ThinkPad Hybrid USB-C/A Dock quirk [3 files, +19/-0]
  299d7d27af net: vlan: don't propagate flags on open [1 file, +4/-27]
  40c70ff44b tracing: fix return value in __ftrace_event_enable_disable for TRACE_REG_UNREGISTER [1 file, +3/-1]
  6b7a32fa9b Bluetooth: hci_uart: fix race during initialization [1 file, +2/-1]
  fe6f1f349d Bluetooth: hci_qca: use the power sequencer for wcn6750 [1 file, +1/-1]
  feed98579d Bluetooth: qca: simplify WCN399x NVM loading [1 file, +6/-7]
  035e1bffc0 Bluetooth: Add quirk for broken READ_VOICE_SETTING [3 files, +15/-0]
  09246dfb5c Bluetooth: Add quirk for broken READ_PAGE_SCAN_TYPE [2 files, +10/-1]
  044c1b3528 drm: allow encoder mode_set even when connectors change for crtc [1 file, +1/-1]
  df33b535f0 drm: panel-orientation-quirks: Add support for AYANEO 2S [1 file, +2/-2]
  6fe4ed94ee drm: panel-orientation-quirks: Add quirks for AYA NEO Flip DS and KB [1 file, +18/-0]
  5dd6fdb889 drm: panel-orientation-quirks: Add quirk for AYA NEO Slide [1 file, +6/-0]
  a64e097426 drm: panel-orientation-quirks: Add new quirk for GPD Win 2 [1 file, +6/-0]
  ba5a998f84 drm: panel-orientation-quirks: Add quirk for OneXPlayer Mini (Intel) [1 file, +12/-0]
  f04612890c drm/debugfs: fix printk format for bridge index [1 file, +1/-1]
  b22cb42a5e drm/bridge: panel: forbid initializing a panel with unknown connector type [1 file, +4/-1]
  1c38108a49 drivers: base: devres: Allow to release group on device release [1 file, +7/-0]
  8feefd106a PCI: Enable Configuration RRS SV early [1 file, +5/-3]
  73d2b96250 PCI: Check BAR index for validity [4 files, +57/-10]
  9a6be23eb0 tracing: probe-events: Add comments about entry data storing code [1 file, +28/-0]
  7b9bdd7059 erofs: set error to bio if file-backed IO fails [1 file, +2/-0]
  806908d5d9 bpf: support SKF_NET_OFF and SKF_LL_OFF on skb frags [1 file, +44/-36]
  dd3edffae8 ext4: don't treat fhandle lookup of ea_inode as FS corruption [1 file, +48/-20]
  2ff58c5b26 arm64: cputype: Add MIDR_CORTEX_A76AE [1 file, +2/-0]
  4af2858435 arm64: errata: Add QCOM_KRYO_4XX_GOLD to the spectre_bhb_k24_list [1 file, +1/-0]
  3b0f2526c8 arm64: errata: Assume that unknown CPUs _are_ vulnerable to Spectre BHB [2 files, +102/-102]
  20c105f587 arm64: errata: Add KRYO 2XX/3XX/4XX silver cores to Spectre BHB safe list [1 file, +3/-0]
  c322789613 KVM: arm64: Tear down vGIC on failed vCPU creation [1 file, +5/-1]
  baea1762cd media: v4l2-dv-timings: prevent possible overflow in v4l2_detect_gtf() [1 file, +2/-2]
  0828d6e9ad io_uring/net: fix accept multishot handling [1 file, +2/-0]
  b7c6d081c1 io_uring/net: fix io_req_post_cqe abuse by send bundle [3 files, +6/-2]
  3e0356857e io_uring/kbuf: reject zero sized provided buffers [1 file, +2/-0]
  16d9067f00 ext4: fix off-by-one error in do_split [1 file, +1/-1]
  a1dde7457d f2fs: fix to avoid atomicity corruption of atomic file [2 files, +5/-3]
  e6bba32857 i3c: Add NULL pointer check in i3c_master_queue_ibi() [1 file, +3/-0]
  9eaec071f1 jbd2: remove wrong sb->s_sequence check [1 file, +0/-1]
  eec737e17e arm64: mops: Do not dereference src reg for a set operation [1 file, +2/-2]
  1dd288783d arm64: mm: Correct the update of max_pfn [1 file, +2/-1]
  5f7f6abd92 net: Fix null-ptr-deref by sock_lock_init_class_and_name() and rmmod. [2 files, +43/-2]
  53dc6b00c0 mm/rmap: reject hugetlb folios in folio_make_device_exclusive() [1 file, +1/-1]
  83b6b5061e mm: make page_mapped_in_vma() hugetlb walk aware [1 file, +9/-4]
  6dd8d9440f mm: fix lazy mmu docs and usage [1 file, +8/-6]
  2532df0a9b mm/mremap: correctly handle partial mremap() of VMA starting at 0 [1 file, +5/-5]
  cc98577f91 mm: add missing release barrier on PGDAT_RECLAIM_LOCKED unlock [1 file, +1/-1]
  14936034de mm/userfaultfd: fix release hang over concurrent GUP [1 file, +25/-26]
  65b259e3e0 mm/hwpoison: introduce folio_contain_hwpoisoned_page() helper [3 files, +8/-4]
  9e7c37fadb sctp: detect and prevent references to a freed transport in sendmsg [3 files, +18/-9]
  474b3194c8 tracing: Do not add length to print format in synthetic events [1 file, +0/-1]
  74f01c2ca8 dm-verity: fix prefetch-vs-suspend race [1 file, +8/-0]
  fae0a8796c KVM: Allow building irqbypass.ko as as module when kvm.ko is a module [3 files, +7/-7]
  dc83eccc93 of/irq: Fix device node refcount leakage in API of_irq_parse_one() [1 file, +27/-32]
  3540164c75 of/irq: Fix device node refcount leakage in API of_irq_parse_raw() [1 file, +8/-0]
  29cb94963c of/irq: Fix device node refcount leakages in of_irq_count() [1 file, +3/-1]
  d0f25a9977 of/irq: Fix device node refcount leakage in API irq_of_parse_and_map() [1 file, +5/-1]
  712d84459a of/irq: Fix device node refcount leakages in of_irq_init() [1 file, +3/-0]
  d69ad6e1a5 PCI: Fix reference leak in pci_alloc_child_bus() [1 file, +4/-1]
  9707d0c932 PCI: Fix reference leak in pci_register_host_bridge() [1 file, +7/-2]
  869202291a PCI: Fix wrong length of devres array [1 file, +1/-1]
  92ca7270fe ring-buffer: Use flush_kernel_vmap_range() over flush_dcache_folio() [1 file, +3/-2]
  9ca4fe3574 arm64: errata: Add newer ARM cores to the spectre_bhb_loop_affected() lists [1 file, +14/-1]
  281782d2c6 Bluetooth: hci_uart: Fix another race during initialization [2 files, +15/-6]

Changes in 6.12.24
	ASoC: Intel: adl: add 2xrt1316 audio configuration
	cgroup/cpuset: Fix incorrect isolated_cpus update in update_parent_effective_cpumask()
	cgroup/cpuset: Fix error handling in remote_partition_disable()
	cgroup/cpuset: Revert "Allow suppression of sched domain rebuild in update_cpumasks_hier()"
	cgroup/cpuset: Enforce at most one rebuild_sched_domains_locked() call per operation
	cgroup/cpuset: Further optimize code if CONFIG_CPUSETS_V1 not set
	cgroup/cpuset: Fix race between newly created partition and dying one
	gpiolib: of: Fix the choice for Ingenic NAND quirk
	selftests/futex: futex_waitv wouldblock test should fail
	ublk: refactor recovery configuration flag helpers
	ublk: fix handling recovery & reissue in ublk_abort_queue()
	drm/i915: Disable RPG during live selftest
	x86/acpi: Don't limit CPUs to 1 for Xen PV guests due to disabled ACPI
	drm/xe/hw_engine: define sysfs_ops on all directories
	ata: pata_pxa: Fix potential NULL pointer dereference in pxa_ata_probe()
	objtool: Fix INSN_CONTEXT_SWITCH handling in validate_unret()
	tipc: fix memory leak in tipc_link_xmit
	codel: remove sch->q.qlen check before qdisc_tree_reduce_backlog()
	net: tls: explicitly disallow disconnect
	octeontx2-pf: qos: fix VF root node parent queue index
	tc: Ensure we have enough buffer space when sending filter netlink notifications
	net: ethtool: Don't call .cleanup_data when prepare_data fails
	drm/tests: modeset: Fix drm_display_mode memory leak
	drm/tests: helpers: Create kunit helper to destroy a drm_display_mode
	drm/tests: cmdline: Fix drm_display_mode memory leak
	drm/tests: modes: Fix drm_display_mode memory leak
	drm/tests: probe-helper: Fix drm_display_mode memory leak
	net: libwx: handle page_pool_dev_alloc_pages error
	ata: sata_sx4: Add error handling in pdc20621_i2c_read()
	drm/i915/huc: Fix fence not released on early probe errors
	nvmet-fcloop: swap list_add_tail arguments
	net_sched: sch_sfq: use a temporary work area for validating configuration
	net_sched: sch_sfq: move the limit validation
	smb: client: fix UAF in decryption with multichannel
	net: phy: move phy_link_change() prior to mdio_bus_phy_may_suspend()
	net: phy: allow MDIO bus PM ops to start/stop state machine for phylink-controlled PHY
	ipv6: Align behavior across nexthops during path selection
	net: ppp: Add bound checking for skb data on ppp_sync_txmung
	nft_set_pipapo: fix incorrect avx2 match of 5th field octet
	iommu/exynos: Fix suspend/resume with IDENTITY domain
	iommu/mediatek: Fix NULL pointer deference in mtk_iommu_device_group
	perf/core: Add aux_pause, aux_resume, aux_start_paused
	perf/core: Simplify the perf_event_alloc() error path
	perf: Fix hang while freeing sigtrap event
	fs: consistently deref the files table with rcu_dereference_raw()
	umount: Allow superblock owners to force umount
	pm: cpupower: bench: Prevent NULL dereference on malloc failure
	x86/mm: Clear _PAGE_DIRTY for kernel mappings when we clear _PAGE_RW
	x86/percpu: Disable named address spaces for UBSAN_BOOL with KASAN for GCC < 14.2
	x86/ia32: Leave NULL selector values 0~3 unchanged
	x86/cpu: Don't clear X86_FEATURE_LAHF_LM flag in init_amd_k8() on AMD when running in a virtual machine
	perf: arm_pmu: Don't disable counter in armpmu_add()
	perf/dwc_pcie: fix some unreleased resources
	PM: hibernate: Avoid deadlock in hibernate_compressor_param_set()
	Flush console log from kernel_power_off()
	arm64: cputype: Add QCOM_CPU_PART_KRYO_3XX_GOLD
	xen/mcelog: Add __nonstring annotations for unterminated strings
	zstd: Increase DYNAMIC_BMI2 GCC version cutoff from 4.8 to 11.0 to work around compiler segfault
	platform/chrome: cros_ec_lpc: Match on Framework ACPI device
	ASoC: SOF: topology: Use krealloc_array() to replace krealloc()
	HID: pidff: Convert infinite length from Linux API to PID standard
	HID: pidff: Do not send effect envelope if it's empty
	HID: pidff: Add MISSING_DELAY quirk and its detection
	HID: pidff: Add MISSING_PBO quirk and its detection
	HID: pidff: Add PERMISSIVE_CONTROL quirk
	HID: pidff: Add hid_pidff_init_with_quirks and export as GPL symbol
	HID: pidff: Add FIX_WHEEL_DIRECTION quirk
	HID: Add hid-universal-pidff driver and supported device ids
	HID: pidff: Add PERIODIC_SINE_ONLY quirk
	HID: pidff: Fix null pointer dereference in pidff_find_fields
	ASoC: amd: ps: use macro for ACP6.3 pci revision id
	ALSA: hda: intel: Fix Optimus when GPU has no sound
	ALSA: hda: intel: Add Lenovo IdeaPad Z570 to probe denylist
	ASoC: fsl_audmix: register card device depends on 'dais' property
	media: uvcvideo: Add quirk for Actions UVC05
	media: s5p-mfc: Corrected NV12M/NV21M plane-sizes
	mmc: dw_mmc: add a quirk for accessing 64-bit FIFOs in two halves
	ALSA: usb-audio: Fix CME quirk for UF series keyboards
	ASoC: amd: Add DMI quirk for ACP6X mic support
	ASoC: amd: yc: update quirk data for new Lenovo model
	platform/x86: x86-android-tablets: Add select POWER_SUPPLY to Kconfig
	wifi: ath11k: Fix DMA buffer allocation to resolve SWIOTLB issues
	wifi: ath11k: fix memory leak in ath11k_xxx_remove()
	wifi: ath12k: fix memory leak in ath12k_pci_remove()
	wifi: ath12k: Fix invalid entry fetch in ath12k_dp_mon_srng_process
	ata: libata-core: Add 'external' to the libata.force kernel parameter
	scsi: mpi3mr: Avoid reply queue full condition
	scsi: mpi3mr: Synchronous access b/w reset and tm thread for reply queue
	net: page_pool: don't cast mp param to devmem
	f2fs: don't retry IO for corrupted data scenario
	wifi: mac80211: add strict mode disabling workarounds
	wifi: mac80211: ensure sdata->work is canceled before initialized.
	scsi: target: spc: Fix RSOC parameter data header size
	net: usb: asix_devices: add FiberGecko DeviceID
	page_pool: avoid infinite loop to schedule delayed worker
	can: flexcan: Add quirk to handle separate interrupt lines for mailboxes
	can: flexcan: add NXP S32G2/S32G3 SoC support
	jfs: Fix uninit-value access of imap allocated in the diMount() function
	fs/jfs: cast inactags to s64 to prevent potential overflow
	fs/jfs: Prevent integer overflow in AG size calculation
	jfs: Prevent copying of nlink with value 0 from disk inode
	jfs: add sanity check for agwidth in dbMount
	ata: libata-eh: Do not use ATAPI DMA for a device limited to PIO mode
	net: sfp: add quirk for 2.5G OEM BX SFP
	wifi: ath12k: Fix invalid data access in ath12k_dp_rx_h_undecap_nwifi
	f2fs: fix to avoid out-of-bounds access in f2fs_truncate_inode_blocks()
	net: sfp: add quirk for FS SFP-10GM-T copper SFP+ module
	ahci: add PCI ID for Marvell 88SE9215 SATA Controller
	ext4: protect ext4_release_dquot against freezing
	Revert "f2fs: rebuild nat_bits during umount"
	ext4: ignore xattrs past end
	cdc_ether|r8152: ThinkPad Hybrid USB-C/A Dock quirk
	scsi: st: Fix array overflow in st_setup()
	ahci: Marvell 88SE9215 controllers prefer DMA for ATAPI
	btrfs: harden block_group::bg_list against list_del() races
	wifi: mt76: mt76x2u: add TP-Link TL-WDN6200 ID to device table
	net: vlan: don't propagate flags on open
	tracing: fix return value in __ftrace_event_enable_disable for TRACE_REG_UNREGISTER
	Bluetooth: btintel_pcie: Add device id of Whale Peak
	Bluetooth: hci_uart: fix race during initialization
	Bluetooth: btusb: Add 2 HWIDs for MT7922
	Bluetooth: hci_qca: use the power sequencer for wcn6750
	Bluetooth: qca: simplify WCN399x NVM loading
	Bluetooth: Add quirk for broken READ_VOICE_SETTING
	Bluetooth: Add quirk for broken READ_PAGE_SCAN_TYPE
	drm: allow encoder mode_set even when connectors change for crtc
	drm/xe/bmg: Add new PCI IDs
	drm/xe/vf: Don't try to trigger a full GT reset if VF
	drm/amd/display: Update Cursor request mode to the beginning prefetch always
	drm/amdgpu: Unlocked unmap only clear page table leaves
	drm: panel-orientation-quirks: Add support for AYANEO 2S
	drm: panel-orientation-quirks: Add quirks for AYA NEO Flip DS and KB
	drm: panel-orientation-quirks: Add quirk for AYA NEO Slide
	drm: panel-orientation-quirks: Add new quirk for GPD Win 2
	drm: panel-orientation-quirks: Add quirk for OneXPlayer Mini (Intel)
	drm/debugfs: fix printk format for bridge index
	drm/bridge: panel: forbid initializing a panel with unknown connector type
	drm/amd/display: stop DML2 from removing pipes based on planes
	drivers: base: devres: Allow to release group on device release
	drm/amdkfd: clamp queue size to minimum
	drm/amdkfd: Fix mode1 reset crash issue
	drm/amdkfd: Fix pqm_destroy_queue race with GPU reset
	drm/amdkfd: debugfs hang_hws skip GPU with MES
	drm/xe/xelp: Move Wa_16011163337 from tunings to workarounds
	drm/mediatek: mtk_dpi: Move the input_2p_en bit to platform data
	drm/mediatek: mtk_dpi: Explicitly manage TVD clock in power on/off
	PCI: Add Rockchip Vendor ID
	drm/amdgpu: handle amdgpu_cgs_create_device() errors in amd_powerplay_create()
	PCI: Enable Configuration RRS SV early
	drm/amdgpu: Fix the race condition for draining retry fault
	PCI: Check BAR index for validity
	PCI: vmd: Make vmd_dev::cfg_lock a raw_spinlock_t type
	drm/amdgpu: grab an additional reference on the gang fence v2
	fbdev: omapfb: Add 'plane' value check
	tracing: probe-events: Add comments about entry data storing code
	ktest: Fix Test Failures Due to Missing LOG_FILE Directories
	tpm, tpm_tis: Workaround failed command reception on Infineon devices
	tpm: End any active auth session before shutdown
	pwm: mediatek: Prevent divide-by-zero in pwm_mediatek_config()
	pwm: rcar: Improve register calculation
	pwm: fsl-ftm: Handle clk_get_rate() returning 0
	erofs: set error to bio if file-backed IO fails
	bpf: support SKF_NET_OFF and SKF_LL_OFF on skb frags
	ext4: don't treat fhandle lookup of ea_inode as FS corruption
	s390/pci: Fix s390_mmio_read/write syscall page fault handling
	HID: pidff: Clamp PERIODIC effect period to device's logical range
	HID: pidff: Stop all effects before enabling actuators
	HID: pidff: Completely rework and fix pidff_reset function
	HID: pidff: Simplify pidff_upload_effect function
	HID: pidff: Define values used in pidff_find_special_fields
	HID: pidff: Rescale time values to match field units
	HID: pidff: Factor out code for setting gain
	HID: pidff: Move all hid-pidff definitions to a dedicated header
	HID: pidff: Simplify pidff_rescale_signed
	HID: pidff: Use macros instead of hardcoded min/max values for shorts
	HID: pidff: Factor out pool report fetch and remove excess declaration
	HID: pidff: Make sure to fetch pool before checking SIMULTANEOUS_MAX
	HID: hid-universal-pidff: Add Asetek wheelbases support
	HID: pidff: Comment and code style update
	HID: pidff: Support device error response from PID_BLOCK_LOAD
	HID: pidff: Remove redundant call to pidff_find_special_keys
	HID: pidff: Rename two functions to align them with naming convention
	HID: pidff: Clamp effect playback LOOP_COUNT value
	HID: pidff: Compute INFINITE value instead of using hardcoded 0xffff
	HID: pidff: Fix 90 degrees direction name North -> East
	HID: pidff: Fix set_device_control()
	auxdisplay: hd44780: Fix an API misuse in hd44780.c
	dt-bindings: media: st,stmipid02: correct lane-polarities maxItems
	media: mediatek: vcodec: Fix a resource leak related to the scp device in FW initialization
	media: mtk-vcodec: venc: avoid -Wenum-compare-conditional warning
	media: uapi: rkisp1-config: Fix typo in extensible params example
	media: mgb4: Fix CMT registers update logic
	media: i2c: adv748x: Fix test pattern selection mask
	media: mgb4: Fix switched CMT frequency range "magic values" sets
	media: intel/ipu6: set the dev_parent of video device to pdev
	media: venus: hfi: add a check to handle OOB in sfr region
	media: venus: hfi: add check to handle incorrect queue size
	media: vim2m: print device name after registering device
	media: siano: Fix error handling in smsdvb_module_init()
	media: rockchip: rga: fix rga offset lookup
	xenfs/xensyms: respect hypervisor's "next" indication
	arm64: cputype: Add MIDR_CORTEX_A76AE
	arm64: errata: Add QCOM_KRYO_4XX_GOLD to the spectre_bhb_k24_list
	arm64: errata: Assume that unknown CPUs _are_ vulnerable to Spectre BHB
	arm64: errata: Add KRYO 2XX/3XX/4XX silver cores to Spectre BHB safe list
	KVM: arm64: Tear down vGIC on failed vCPU creation
	spi: cadence-qspi: Fix probe on AM62A LP SK
	mtd: rawnand: brcmnand: fix PM resume warning
	tpm, tpm_tis: Fix timeout handling when waiting for TPM status
	accel/ivpu: Fix PM related deadlocks in MS IOCTLs
	media: streamzap: prevent processing IR data on URB failure
	media: hi556: Fix memory leak (on error) in hi556_check_hwcfg()
	media: visl: Fix ERANGE error when setting enum controls
	media: platform: stm32: Add check for clk_enable()
	media: imx219: Adjust PLL settings based on the number of MIPI lanes
	media: v4l2-dv-timings: prevent possible overflow in v4l2_detect_gtf()
	Revert "media: imx214: Fix the error handling in imx214_probe()"
	media: i2c: ccs: Set the device's runtime PM status correctly in remove
	media: i2c: ccs: Set the device's runtime PM status correctly in probe
	media: i2c: ov7251: Set enable GPIO low in probe
	media: i2c: ov7251: Introduce 1 ms delay between regulators and en GPIO
	media: nuvoton: Fix reference handling of ece_node
	media: nuvoton: Fix reference handling of ece_pdev
	media: venus: hfi_parser: add check to avoid out of bound access
	media: venus: hfi_parser: refactor hfi packet parsing logic
	media: i2c: imx319: Rectify runtime PM handling probe and remove
	media: i2c: imx219: Rectify runtime PM handling in probe and remove
	media: i2c: imx214: Rectify probe error handling related to runtime PM
	media: chips-media: wave5: Fix gray color on screen
	media: chips-media: wave5: Avoid race condition in the interrupt handler
	media: chips-media: wave5: Fix a hang after seeking
	media: chips-media: wave5: Fix timeout while testing 10bit hevc fluster
	mptcp: sockopt: fix getting IPV6_V6ONLY
	mptcp: sockopt: fix getting freebind & transparent
	mtd: Add check for devm_kcalloc()
	net: dsa: mv88e6xxx: workaround RGMII transmit delay erratum for 6320 family
	net: dsa: mv88e6xxx: fix internal PHYs for 6320 family
	mtd: Replace kcalloc() with devm_kcalloc()
	clocksource/drivers/stm32-lptimer: Use wakeup capable instead of init wakeup
	wifi: mt76: Add check for devm_kstrdup()
	wifi: mac80211: fix integer overflow in hwmp_route_info_get()
	wifi: mt76: mt7925: ensure wow pattern command align fw format
	wifi: mt76: mt7925: fix country count limitation for CLC
	wifi: mt76: mt7925: fix the wrong link_idx when a p2p_device is present
	wifi: mt76: mt7925: fix the wrong simultaneous cap for MLO
	io_uring/net: fix accept multishot handling
	io_uring/net: fix io_req_post_cqe abuse by send bundle
	io_uring/kbuf: reject zero sized provided buffers
	ASoC: codecs: wcd937x: fix a potential memory leak in wcd937x_soc_codec_probe()
	ASoC: q6apm: add q6apm_get_hw_pointer helper
	ASoC: q6apm-dai: schedule all available frames to avoid dsp under-runs
	ASoC: q6apm-dai: make use of q6apm_get_hw_pointer
	ASoC: qdsp6: q6apm-dai: set 10 ms period and buffer alignment.
	ASoC: qdsp6: q6apm-dai: fix capture pipeline overruns.
	ASoC: qdsp6: q6asm-dai: fix q6asm_dai_compr_set_params error path
	ALSA: hda/realtek: Enable Mute LED on HP OMEN 16 Laptop xd000xx
	accel/ivpu: Fix warning in ivpu_ipc_send_receive_internal()
	accel/ivpu: Fix deadlock in ivpu_ms_cleanup()
	bus: mhi: host: Fix race between unprepare and queue_buf
	ext4: fix off-by-one error in do_split
	f2fs: fix to avoid atomicity corruption of atomic file
	vdpa/mlx5: Fix oversized null mkey longer than 32bit
	udf: Fix inode_getblk() return value
	tpm: do not start chip while suspended
	svcrdma: do not unregister device for listeners
	soc: samsung: exynos-chipid: Add NULL pointer check in exynos_chipid_probe()
	smb311 client: fix missing tcon check when mounting with linux/posix extensions
	ima: limit the number of open-writers integrity violations
	ima: limit the number of ToMToU integrity violations
	i3c: master: svc: Use readsb helper for reading MDB
	i3c: Add NULL pointer check in i3c_master_queue_ibi()
	jbd2: remove wrong sb->s_sequence check
	kbuild: exclude .rodata.(cst|str)* when building ranges
	leds: rgb: leds-qcom-lpg: Fix pwm resolution max for Hi-Res PWMs
	leds: rgb: leds-qcom-lpg: Fix calculation of best period Hi-Res PWMs
	mfd: ene-kb3930: Fix a potential NULL pointer dereference
	mailbox: tegra-hsp: Define dimensioning masks in SoC data
	locking/lockdep: Decrease nr_unused_locks if lock unused in zap_class()
	lib: scatterlist: fix sg_split_phys to preserve original scatterlist offsets
	mptcp: fix NULL pointer in can_accept_new_subflow
	mptcp: only inc MPJoinAckHMacFailure for HMAC failures
	mtd: inftlcore: Add error check for inftl_read_oob()
	mtd: rawnand: Add status chack in r852_ready()
	arm64: mops: Do not dereference src reg for a set operation
	arm64: tegra: Remove the Orin NX/Nano suspend key
	arm64: mm: Correct the update of max_pfn
	arm64: dts: mediatek: mt8173: Fix disp-pwm compatible string
	arm64: dts: exynos: gs101: disable pinctrl_gsacore node
	backlight: led_bl: Hold led_access lock when calling led_sysfs_disable()
	btrfs: fix non-empty delayed iputs list on unmount due to compressed write workers
	btrfs: tests: fix chunk map leak after failure to add it to the tree
	btrfs: zoned: fix zone activation with missing devices
	btrfs: zoned: fix zone finishing with missing devices
	iommufd: Fix uninitialized rc in iommufd_access_rw()
	iommu/tegra241-cmdqv: Fix warnings due to dmam_free_coherent()
	iommu/vt-d: Put IRTE back into posted MSI mode if vCPU posting is disabled
	iommu/vt-d: Don't clobber posted vCPU IRTE when host IRQ affinity changes
	iommu/vt-d: Fix possible circular locking dependency
	iommu/vt-d: Wire up irq_ack() to irq_move_irq() for posted MSIs
	sparc/mm: disable preemption in lazy mmu mode
	sparc/mm: avoid calling arch_enter/leave_lazy_mmu() in set_ptes
	net: Fix null-ptr-deref by sock_lock_init_class_and_name() and rmmod.
	mm/damon/ops: have damon_get_folio return folio even for tail pages
	mm/rmap: reject hugetlb folios in folio_make_device_exclusive()
	mm: make page_mapped_in_vma() hugetlb walk aware
	mm: fix lazy mmu docs and usage
	mm/mremap: correctly handle partial mremap() of VMA starting at 0
	mm: add missing release barrier on PGDAT_RECLAIM_LOCKED unlock
	mm/userfaultfd: fix release hang over concurrent GUP
	mm/hwpoison: do not send SIGBUS to processes with recovered clean pages
	mm/hugetlb: move hugetlb_sysctl_init() to the __init section
	mm/hwpoison: introduce folio_contain_hwpoisoned_page() helper
	sctp: detect and prevent references to a freed transport in sendmsg
	x86/xen: fix balloon target initialization for PVH dom0
	tracing: fprobe events: Fix possible UAF on modules
	tracing: Do not add length to print format in synthetic events
	thermal/drivers/rockchip: Add missing rk3328 mapping entry
	CIFS: Propagate min offload along with other parameters from primary to secondary channels.
	cifs: avoid NULL pointer dereference in dbg call
	cifs: fix integer overflow in match_server()
	cifs: Ensure that all non-client-specific reparse points are processed by the server
	clk: renesas: r9a07g043: Fix HP clock source for RZ/Five
	clk: qcom: clk-branch: Fix invert halt status bit check for votable clocks
	clk: qcom: gdsc: Release pm subdomains in reverse add order
	clk: qcom: gdsc: Capture pm_genpd_add_subdomain result code
	clk: qcom: gdsc: Set retain_ff before moving to HW CTRL
	crypto: ccp - Fix check for the primary ASP device
	crypto: ccp - Fix uAPI definitions of PSP errors
	dlm: fix error if inactive rsb is not hashed
	dlm: fix error if active rsb is not hashed
	dm-ebs: fix prefetch-vs-suspend race
	dm-integrity: set ti->error on memory allocation failure
	dm-integrity: fix non-constant-time tag verification
	dm-verity: fix prefetch-vs-suspend race
	dt-bindings: coresight: qcom,coresight-tpda: Fix too many 'reg'
	dt-bindings: coresight: qcom,coresight-tpdm: Fix too many 'reg'
	ftrace: Add cond_resched() to ftrace_graph_set_hash()
	ftrace: Properly merge notrace hashes
	gpio: tegra186: fix resource handling in ACPI probe path
	gpio: zynq: Fix wakeup source leaks on device unbind
	gve: handle overflow when reporting TX consumed descriptors
	KVM: Allow building irqbypass.ko as as module when kvm.ko is a module
	KVM: PPC: Enable CAP_SPAPR_TCE_VFIO on pSeries KVM guests
	KVM: x86: Explicitly zero-initialize on-stack CPUID unions
	KVM: x86: Acquire SRCU in KVM_GET_MP_STATE to protect guest memory accesses
	landlock: Move code to ease future backports
	landlock: Add the errata interface
	landlock: Add erratum for TCP fix
	landlock: Always allow signals between threads of the same process
	landlock: Prepare to add second errata
	selftests/landlock: Split signal_scoping_threads tests
	selftests/landlock: Add a new test for setuid()
	misc: pci_endpoint_test: Fix displaying 'irq_type' after 'request_irq' error
	net: mana: Switch to page pool for jumbo frames
	ntb: use 64-bit arithmetic for the MSI doorbell mask
	of/irq: Fix device node refcount leakage in API of_irq_parse_one()
	of/irq: Fix device node refcount leakage in API of_irq_parse_raw()
	of/irq: Fix device node refcount leakages in of_irq_count()
	of/irq: Fix device node refcount leakage in API irq_of_parse_and_map()
	of/irq: Fix device node refcount leakages in of_irq_init()
	PCI: brcmstb: Fix missing of_node_put() in brcm_pcie_probe()
	PCI: j721e: Fix the value of .linkdown_irq_regfield for J784S4
	PCI: pciehp: Avoid unnecessary device replacement check
	PCI: Fix reference leak in pci_alloc_child_bus()
	PCI: Fix reference leak in pci_register_host_bridge()
	PCI: Fix wrong length of devres array
	phy: freescale: imx8m-pcie: assert phy reset and perst in power off
	pinctrl: qcom: Clear latched interrupt status when changing IRQ type
	pinctrl: samsung: add support for eint_fltcon_offset
	ring-buffer: Use flush_kernel_vmap_range() over flush_dcache_folio()
	s390/pci: Fix zpci_bus_is_isolated_vf() for non-VFs
	s390/virtio_ccw: Don't allocate/assign airqs for non-existing queues
	s390: Fix linker error when -no-pie option is unavailable
	sched_ext: create_dsq: Return -EEXIST on duplicate request
	selftests: mptcp: close fd_in before returning in main_loop
	selftests: mptcp: fix incorrect fd checks in main_loop
	thermal/drivers/mediatek/lvts: Disable monitor mode during suspend
	thermal/drivers/mediatek/lvts: Disable Stage 3 thermal threshold
	arm64: errata: Add newer ARM cores to the spectre_bhb_loop_affected() lists
	iommufd: Make attach_handle generic than fault specific
	iommufd: Fail replace if device has not been attached
	x86/paravirt: Move halt paravirt calls under CONFIG_PARAVIRT
	ACPI: platform-profile: Fix CFI violation when accessing sysfs files
	NFSD: fix decoding in nfs4_xdr_dec_cb_getattr
	NFSD: Fix CB_GETATTR status fix
	nfsd: don't ignore the return code of svc_proc_register()
	x86/e820: Fix handling of subpage regions when calculating nosave ranges in e820__register_nosave_regions()
	libbpf: Prevent compiler warnings/errors
	kbuild: Add '-fno-builtin-wcslen'
	media: mediatek: vcodec: mark vdec_vp9_slice_map_counts_eob_coef noinline
	Bluetooth: hci_uart: Fix another race during initialization
	s390/cpumf: Fix double free on error in cpumf_pmu_event_init()
	HSI: ssi_protocol: Fix use after free vulnerability in ssi_protocol Driver Due to Race Condition
	Linux 6.12.24

Change-Id: I272e8aac67399f2eb57ca25e05cded24172d2d76
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2025-04-30 09:07:04 +00:00
105 changed files with 1188 additions and 507 deletions

View File

@@ -55,8 +55,7 @@ properties:
- const: arm,primecell
reg:
minItems: 1
maxItems: 2
maxItems: 1
clocks:
maxItems: 1

View File

@@ -41,8 +41,7 @@ properties:
- const: arm,primecell
reg:
minItems: 1
maxItems: 2
maxItems: 1
qcom,dsb-element-bits:
description:

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 12
SUBLEVEL = 23
SUBLEVEL = 24
EXTRAVERSION =
NAME = Baby Opossum Posse
@@ -1045,6 +1045,9 @@ ifdef CONFIG_CC_IS_GCC
KBUILD_CFLAGS += -fconserve-stack
endif
# Ensure compilers do not transform certain loops into calls to wcslen()
KBUILD_CFLAGS += -fno-builtin-wcslen
# change __FILE__ to the relative path from the srctree
KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)

View File

@@ -876,6 +876,14 @@ static u8 spectre_bhb_loop_affected(void)
{
u8 k = 0;
static const struct midr_range spectre_bhb_k132_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
};
static const struct midr_range spectre_bhb_k38_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
};
static const struct midr_range spectre_bhb_k32_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
@@ -889,6 +897,7 @@ static u8 spectre_bhb_loop_affected(void)
};
static const struct midr_range spectre_bhb_k24_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76AE),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD),
@@ -904,7 +913,11 @@ static u8 spectre_bhb_loop_affected(void)
{},
};
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k132_list))
k = 132;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k38_list))
k = 38;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
k = 32;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
k = 24;

View File

@@ -550,12 +550,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE:
fallthrough;
case KVM_CAP_SPAPR_TCE_64:
r = 1;
break;
case KVM_CAP_SPAPR_TCE_VFIO:
r = !!cpu_has_feature(CPU_FTR_HVMODE);
break;
case KVM_CAP_PPC_RTAS:
case KVM_CAP_PPC_FIXUP_HCALL:
case KVM_CAP_PPC_ENABLE_HCALL:

View File

@@ -15,7 +15,7 @@ KBUILD_CFLAGS_MODULE += -fPIC
KBUILD_AFLAGS += -m64
KBUILD_CFLAGS += -m64
KBUILD_CFLAGS += -fPIC
LDFLAGS_vmlinux := -no-pie --emit-relocs --discard-none
LDFLAGS_vmlinux := $(call ld-option,-no-pie) --emit-relocs --discard-none
extra_tools := relocs
aflags_dwarf := -Wa,-gdwarf-2
KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__

View File

@@ -858,18 +858,13 @@ static int cpumf_pmu_event_type(struct perf_event *event)
static int cpumf_pmu_event_init(struct perf_event *event)
{
unsigned int type = event->attr.type;
int err;
int err = -ENOENT;
if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW)
err = __hw_perf_event_init(event, type);
else if (event->pmu->type == type)
/* Registered as unknown PMU */
err = __hw_perf_event_init(event, cpumf_pmu_event_type(event));
else
return -ENOENT;
if (unlikely(err) && event->destroy)
event->destroy(event);
return err;
}
@@ -1819,8 +1814,6 @@ static int cfdiag_event_init(struct perf_event *event)
event->destroy = hw_perf_event_destroy;
err = cfdiag_event_init2(event);
if (unlikely(err))
event->destroy(event);
out:
return err;
}

View File

@@ -898,9 +898,6 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
event->attr.exclude_idle = 0;
err = __hw_perf_event_init(event);
if (unlikely(err))
if (event->destroy)
event->destroy(event);
return err;
}

View File

@@ -335,6 +335,9 @@ static bool zpci_bus_is_isolated_vf(struct zpci_bus *zbus, struct zpci_dev *zdev
{
struct pci_dev *pdev;
if (!zdev->vfn)
return false;
pdev = zpci_iov_find_parent_pf(zbus, zdev);
if (!pdev)
return true;

View File

@@ -76,6 +76,28 @@ static __always_inline void native_local_irq_restore(unsigned long flags)
#endif
#ifndef CONFIG_PARAVIRT
#ifndef __ASSEMBLY__
/*
* Used in the idle loop; sti takes one instruction cycle
* to complete:
*/
static __always_inline void arch_safe_halt(void)
{
native_safe_halt();
}
/*
* Used when interrupts are already enabled or to
* shutdown the processor:
*/
static __always_inline void halt(void)
{
native_halt();
}
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_PARAVIRT */
#ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h>
#else
@@ -97,24 +119,6 @@ static __always_inline void arch_local_irq_enable(void)
native_irq_enable();
}
/*
* Used in the idle loop; sti takes one instruction cycle
* to complete:
*/
static __always_inline void arch_safe_halt(void)
{
native_safe_halt();
}
/*
* Used when interrupts are already enabled or to
* shutdown the processor:
*/
static __always_inline void halt(void)
{
native_halt();
}
/*
* For spinlocks, etc:
*/

View File

@@ -107,6 +107,16 @@ static inline void notify_page_enc_status_changed(unsigned long pfn,
PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc);
}
static __always_inline void arch_safe_halt(void)
{
PVOP_VCALL0(irq.safe_halt);
}
static inline void halt(void)
{
PVOP_VCALL0(irq.halt);
}
#ifdef CONFIG_PARAVIRT_XXL
static inline void load_sp0(unsigned long sp0)
{
@@ -170,16 +180,6 @@ static inline void __write_cr4(unsigned long x)
PVOP_VCALL1(cpu.write_cr4, x);
}
static __always_inline void arch_safe_halt(void)
{
PVOP_VCALL0(irq.safe_halt);
}
static inline void halt(void)
{
PVOP_VCALL0(irq.halt);
}
extern noinstr void pv_native_wbinvd(void);
static __always_inline void wbinvd(void)

View File

@@ -122,10 +122,9 @@ struct pv_irq_ops {
struct paravirt_callee_save save_fl;
struct paravirt_callee_save irq_disable;
struct paravirt_callee_save irq_enable;
#endif
void (*safe_halt)(void);
void (*halt)(void);
#endif
} __no_randomize_layout;
struct pv_mmu_ops {

View File

@@ -754,22 +754,21 @@ void __init e820__memory_setup_extended(u64 phys_addr, u32 data_len)
void __init e820__register_nosave_regions(unsigned long limit_pfn)
{
int i;
unsigned long pfn = 0;
u64 last_addr = 0;
for (i = 0; i < e820_table->nr_entries; i++) {
struct e820_entry *entry = &e820_table->entries[i];
if (pfn < PFN_UP(entry->addr))
register_nosave_region(pfn, PFN_UP(entry->addr));
pfn = PFN_DOWN(entry->addr + entry->size);
if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
register_nosave_region(PFN_UP(entry->addr), pfn);
continue;
if (pfn >= limit_pfn)
break;
if (last_addr < entry->addr)
register_nosave_region(PFN_DOWN(last_addr), PFN_UP(entry->addr));
last_addr = entry->addr + entry->size;
}
register_nosave_region(PFN_DOWN(last_addr), limit_pfn);
}
#ifdef CONFIG_ACPI

View File

@@ -100,6 +100,11 @@ int paravirt_disable_iospace(void)
return request_resource(&ioport_resource, &reserve_ioports);
}
static noinstr void pv_native_safe_halt(void)
{
native_safe_halt();
}
#ifdef CONFIG_PARAVIRT_XXL
static noinstr void pv_native_write_cr2(unsigned long val)
{
@@ -121,10 +126,6 @@ noinstr void pv_native_wbinvd(void)
native_wbinvd();
}
static noinstr void pv_native_safe_halt(void)
{
native_safe_halt();
}
#endif
struct pv_info pv_info = {
@@ -182,9 +183,11 @@ struct paravirt_patch_template pv_ops = {
.irq.save_fl = __PV_IS_CALLEE_SAVE(pv_native_save_fl),
.irq.irq_disable = __PV_IS_CALLEE_SAVE(pv_native_irq_disable),
.irq.irq_enable = __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
#endif /* CONFIG_PARAVIRT_XXL */
/* Irq HLT ops. */
.irq.safe_halt = pv_native_safe_halt,
.irq.halt = native_halt,
#endif /* CONFIG_PARAVIRT_XXL */
/* Mmu ops. */
.mmu.flush_tlb_user = native_flush_tlb_local,

View File

@@ -1047,8 +1047,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
}
break;
case 0xa: { /* Architectural Performance Monitoring */
union cpuid10_eax eax;
union cpuid10_edx edx;
union cpuid10_eax eax = { };
union cpuid10_edx edx = { };
if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
@@ -1064,8 +1064,6 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
if (kvm_pmu_cap.version)
edx.split.anythread_deprecated = 1;
edx.split.reserved1 = 0;
edx.split.reserved2 = 0;
entry->eax = eax.full;
entry->ebx = kvm_pmu_cap.events_mask;
@@ -1383,7 +1381,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
break;
/* AMD Extended Performance Monitoring and Debug */
case 0x80000022: {
union cpuid_0x80000022_ebx ebx;
union cpuid_0x80000022_ebx ebx = { };
entry->ecx = entry->edx = 0;
if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {

View File

@@ -11769,6 +11769,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
if (kvm_mpx_supported())
kvm_load_guest_fpu(vcpu);
kvm_vcpu_srcu_read_lock(vcpu);
r = kvm_apic_accept_events(vcpu);
if (r < 0)
goto out;
@@ -11782,6 +11784,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
mp_state->mp_state = vcpu->arch.mp_state;
out:
kvm_vcpu_srcu_read_unlock(vcpu);
if (kvm_mpx_supported())
kvm_put_guest_fpu(vcpu);
vcpu_put(vcpu);

View File

@@ -70,6 +70,9 @@ EXPORT_SYMBOL(xen_start_flags);
*/
struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
/* Number of pages released from the initial allocation. */
unsigned long xen_released_pages;
static __ref void xen_get_vendor(void)
{
init_cpu_devs();
@@ -465,6 +468,13 @@ int __init arch_xen_unpopulated_init(struct resource **res)
xen_free_unpopulated_pages(1, &pg);
}
/*
* Account for the region being in the physmap but unpopulated.
* The value in xen_released_pages is used by the balloon
* driver to know how much of the physmap is unpopulated and
* set an accurate initial memory target.
*/
xen_released_pages += xen_extra_mem[i].n_pfns;
/* Zero so region is not also added to the balloon driver. */
xen_extra_mem[i].n_pfns = 0;
}

View File

@@ -37,9 +37,6 @@
#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
/* Number of pages released from the initial allocation. */
unsigned long xen_released_pages;
/* Memory map would allow PCI passthrough. */
bool xen_pv_pci_possible;

View File

@@ -22,8 +22,8 @@ static const char * const profile_names[] = {
};
static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST);
static ssize_t platform_profile_choices_show(struct device *dev,
struct device_attribute *attr,
static ssize_t platform_profile_choices_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
int len = 0;
@@ -49,8 +49,8 @@ static ssize_t platform_profile_choices_show(struct device *dev,
return len;
}
static ssize_t platform_profile_show(struct device *dev,
struct device_attribute *attr,
static ssize_t platform_profile_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
enum platform_profile_option profile = PLATFORM_PROFILE_BALANCED;
@@ -77,8 +77,8 @@ static ssize_t platform_profile_show(struct device *dev,
return sysfs_emit(buf, "%s\n", profile_names[profile]);
}
static ssize_t platform_profile_store(struct device *dev,
struct device_attribute *attr,
static ssize_t platform_profile_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int err, i;
@@ -115,12 +115,12 @@ static ssize_t platform_profile_store(struct device *dev,
return count;
}
static DEVICE_ATTR_RO(platform_profile_choices);
static DEVICE_ATTR_RW(platform_profile);
static struct kobj_attribute attr_platform_profile_choices = __ATTR_RO(platform_profile_choices);
static struct kobj_attribute attr_platform_profile = __ATTR_RW(platform_profile);
static struct attribute *platform_profile_attrs[] = {
&dev_attr_platform_profile_choices.attr,
&dev_attr_platform_profile.attr,
&attr_platform_profile_choices.attr,
&attr_platform_profile.attr,
NULL
};

View File

@@ -102,7 +102,8 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
if (!skb) {
percpu_down_read(&hu->proto_lock);
if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
if (test_bit(HCI_UART_PROTO_READY, &hu->flags) ||
test_bit(HCI_UART_PROTO_INIT, &hu->flags))
skb = hu->proto->dequeue(hu);
percpu_up_read(&hu->proto_lock);
@@ -124,7 +125,8 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
if (!percpu_down_read_trylock(&hu->proto_lock))
return 0;
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
!test_bit(HCI_UART_PROTO_INIT, &hu->flags))
goto no_schedule;
set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
@@ -278,7 +280,8 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
percpu_down_read(&hu->proto_lock);
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
!test_bit(HCI_UART_PROTO_INIT, &hu->flags)) {
percpu_up_read(&hu->proto_lock);
return -EUNATCH;
}
@@ -585,7 +588,8 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
if (tty != hu->tty)
return;
if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
if (test_bit(HCI_UART_PROTO_READY, &hu->flags) ||
test_bit(HCI_UART_PROTO_INIT, &hu->flags))
hci_uart_tx_wakeup(hu);
}
@@ -611,7 +615,8 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
percpu_down_read(&hu->proto_lock);
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
!test_bit(HCI_UART_PROTO_INIT, &hu->flags)) {
percpu_up_read(&hu->proto_lock);
return;
}
@@ -707,13 +712,16 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id)
hu->proto = p;
set_bit(HCI_UART_PROTO_READY, &hu->flags);
set_bit(HCI_UART_PROTO_INIT, &hu->flags);
err = hci_uart_register_dev(hu);
if (err) {
return err;
}
set_bit(HCI_UART_PROTO_READY, &hu->flags);
clear_bit(HCI_UART_PROTO_INIT, &hu->flags);
return 0;
}

View File

@@ -90,6 +90,7 @@ struct hci_uart {
#define HCI_UART_REGISTERED 1
#define HCI_UART_PROTO_READY 2
#define HCI_UART_NO_SUSPEND_NOTIFIER 3
#define HCI_UART_PROTO_INIT 4
/* TX states */
#define HCI_UART_SENDING 1

View File

@@ -28,7 +28,7 @@ static bool clk_branch_in_hwcg_mode(const struct clk_branch *br)
static bool clk_branch_check_halt(const struct clk_branch *br, bool enabling)
{
bool invert = (br->halt_check == BRANCH_HALT_ENABLE);
bool invert = (br->halt_check & BRANCH_HALT_ENABLE);
u32 val;
regmap_read(br->clkr.regmap, br->halt_reg, &val);
@@ -44,7 +44,7 @@ static bool clk_branch2_check_halt(const struct clk_branch *br, bool enabling)
{
u32 val;
u32 mask;
bool invert = (br->halt_check == BRANCH_HALT_ENABLE);
bool invert = (br->halt_check & BRANCH_HALT_ENABLE);
mask = CBCR_NOC_FSM_STATUS;
mask |= CBCR_CLK_OFF;

View File

@@ -292,6 +292,9 @@ static int gdsc_enable(struct generic_pm_domain *domain)
*/
udelay(1);
if (sc->flags & RETAIN_FF_ENABLE)
gdsc_retain_ff_on(sc);
/* Turn on HW trigger mode if supported */
if (sc->flags & HW_CTRL) {
ret = gdsc_hwctrl(sc, true);
@@ -308,9 +311,6 @@ static int gdsc_enable(struct generic_pm_domain *domain)
udelay(1);
}
if (sc->flags & RETAIN_FF_ENABLE)
gdsc_retain_ff_on(sc);
return 0;
}
@@ -457,13 +457,6 @@ static int gdsc_init(struct gdsc *sc)
goto err_disable_supply;
}
/* Turn on HW trigger mode if supported */
if (sc->flags & HW_CTRL) {
ret = gdsc_hwctrl(sc, true);
if (ret < 0)
goto err_disable_supply;
}
/*
* Make sure the retain bit is set if the GDSC is already on,
* otherwise we end up turning off the GDSC and destroying all
@@ -471,6 +464,14 @@ static int gdsc_init(struct gdsc *sc)
*/
if (sc->flags & RETAIN_FF_ENABLE)
gdsc_retain_ff_on(sc);
/* Turn on HW trigger mode if supported */
if (sc->flags & HW_CTRL) {
ret = gdsc_hwctrl(sc, true);
if (ret < 0)
goto err_disable_supply;
}
} else if (sc->flags & ALWAYS_ON) {
/* If ALWAYS_ON GDSCs are not ON, turn them ON */
gdsc_enable(&sc->pd);
@@ -506,6 +507,23 @@ err_disable_supply:
return ret;
}
static void gdsc_pm_subdomain_remove(struct gdsc_desc *desc, size_t num)
{
struct device *dev = desc->dev;
struct gdsc **scs = desc->scs;
int i;
/* Remove subdomains */
for (i = num - 1; i >= 0; i--) {
if (!scs[i])
continue;
if (scs[i]->parent)
pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
else if (!IS_ERR_OR_NULL(dev->pm_domain))
pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
}
}
int gdsc_register(struct gdsc_desc *desc,
struct reset_controller_dev *rcdev, struct regmap *regmap)
{
@@ -555,30 +573,27 @@ int gdsc_register(struct gdsc_desc *desc,
if (!scs[i])
continue;
if (scs[i]->parent)
pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
ret = pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
else if (!IS_ERR_OR_NULL(dev->pm_domain))
pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
ret = pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
if (ret)
goto err_pm_subdomain_remove;
}
return of_genpd_add_provider_onecell(dev->of_node, data);
err_pm_subdomain_remove:
gdsc_pm_subdomain_remove(desc, i);
return ret;
}
void gdsc_unregister(struct gdsc_desc *desc)
{
int i;
struct device *dev = desc->dev;
struct gdsc **scs = desc->scs;
size_t num = desc->num;
/* Remove subdomains */
for (i = 0; i < num; i++) {
if (!scs[i])
continue;
if (scs[i]->parent)
pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
else if (!IS_ERR_OR_NULL(dev->pm_domain))
pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
}
gdsc_pm_subdomain_remove(desc, num);
of_genpd_del_provider(dev->of_node);
}

View File

@@ -89,7 +89,9 @@ static const struct clk_div_table dtable_1_32[] = {
/* Mux clock tables */
static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" };
#ifdef CONFIG_ARM64
static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" };
#endif
static const char * const sel_sdhi[] = { ".clk_533", ".clk_400", ".clk_266" };
static const u32 mtable_sdhi[] = { 1, 2, 3 };
@@ -137,7 +139,12 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
DEF_DIV("P2", R9A07G043_CLK_P2, CLK_PLL3_DIV2_4_2, DIVPL3A, dtable_1_32),
DEF_FIXED("M0", R9A07G043_CLK_M0, CLK_PLL3_DIV2_4, 1, 1),
DEF_FIXED("ZT", R9A07G043_CLK_ZT, CLK_PLL3_DIV2_4_2, 1, 1),
#ifdef CONFIG_ARM64
DEF_MUX("HP", R9A07G043_CLK_HP, SEL_PLL6_2, sel_pll6_2),
#endif
#ifdef CONFIG_RISCV
DEF_FIXED("HP", R9A07G043_CLK_HP, CLK_PLL6_250, 1, 1),
#endif
DEF_FIXED("SPI0", R9A07G043_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2),
DEF_FIXED("SPI1", R9A07G043_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4),
DEF_SD_MUX("SD0", R9A07G043_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_sdhi,

View File

@@ -189,14 +189,17 @@ static bool sp_pci_is_master(struct sp_device *sp)
pdev_new = to_pci_dev(dev_new);
pdev_cur = to_pci_dev(dev_cur);
if (pdev_new->bus->number < pdev_cur->bus->number)
return true;
if (pci_domain_nr(pdev_new->bus) != pci_domain_nr(pdev_cur->bus))
return pci_domain_nr(pdev_new->bus) < pci_domain_nr(pdev_cur->bus);
if (PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn))
return true;
if (pdev_new->bus->number != pdev_cur->bus->number)
return pdev_new->bus->number < pdev_cur->bus->number;
if (PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn))
return true;
if (PCI_SLOT(pdev_new->devfn) != PCI_SLOT(pdev_cur->devfn))
return PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn);
if (PCI_FUNC(pdev_new->devfn) != PCI_FUNC(pdev_cur->devfn))
return PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn);
return false;
}

View File

@@ -823,6 +823,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
struct gpio_irq_chip *irq;
struct tegra_gpio *gpio;
struct device_node *np;
struct resource *res;
char **names;
int err;
@@ -842,19 +843,19 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->num_banks++;
/* get register apertures */
gpio->secure = devm_platform_ioremap_resource_byname(pdev, "security");
if (IS_ERR(gpio->secure)) {
gpio->secure = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpio->secure))
return PTR_ERR(gpio->secure);
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "security");
if (!res)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
gpio->secure = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(gpio->secure))
return PTR_ERR(gpio->secure);
gpio->base = devm_platform_ioremap_resource_byname(pdev, "gpio");
if (IS_ERR(gpio->base)) {
gpio->base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(gpio->base))
return PTR_ERR(gpio->base);
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpio");
if (!res)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
gpio->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(gpio->base))
return PTR_ERR(gpio->base);
err = platform_irq_count(pdev);
if (err < 0)

View File

@@ -1011,6 +1011,7 @@ static void zynq_gpio_remove(struct platform_device *pdev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
dev_warn(&pdev->dev, "pm_runtime_get_sync() Failed\n");
device_init_wakeup(&pdev->dev, 0);
gpiochip_remove(&gpio->chip);
device_set_wakeup_capable(&pdev->dev, 0);
pm_runtime_disable(&pdev->dev);

View File

@@ -401,6 +401,7 @@ static void ssip_reset(struct hsi_client *cl)
del_timer(&ssi->rx_wd);
del_timer(&ssi->tx_wd);
del_timer(&ssi->keep_alive);
cancel_work_sync(&ssi->work);
ssi->main_state = 0;
ssi->send_state = 0;
ssi->recv_state = 0;

View File

@@ -352,6 +352,122 @@ iommufd_device_attach_reserved_iova(struct iommufd_device *idev,
return 0;
}
/* The device attach/detach/replace helpers for attach_handle */
/* Check if idev is attached to igroup->hwpt */
static bool iommufd_device_is_attached(struct iommufd_device *idev)
{
struct iommufd_device *cur;
list_for_each_entry(cur, &idev->igroup->device_list, group_item)
if (cur == idev)
return true;
return false;
}
static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev)
{
struct iommufd_attach_handle *handle;
int rc;
lockdep_assert_held(&idev->igroup->lock);
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
if (hwpt->fault) {
rc = iommufd_fault_iopf_enable(idev);
if (rc)
goto out_free_handle;
}
handle->idev = idev;
rc = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
&handle->handle);
if (rc)
goto out_disable_iopf;
return 0;
out_disable_iopf:
if (hwpt->fault)
iommufd_fault_iopf_disable(idev);
out_free_handle:
kfree(handle);
return rc;
}
static struct iommufd_attach_handle *
iommufd_device_get_attach_handle(struct iommufd_device *idev)
{
struct iommu_attach_handle *handle;
lockdep_assert_held(&idev->igroup->lock);
handle =
iommu_attach_handle_get(idev->igroup->group, IOMMU_NO_PASID, 0);
if (IS_ERR(handle))
return NULL;
return to_iommufd_handle(handle);
}
static void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev)
{
struct iommufd_attach_handle *handle;
handle = iommufd_device_get_attach_handle(idev);
iommu_detach_group_handle(hwpt->domain, idev->igroup->group);
if (hwpt->fault) {
iommufd_auto_response_faults(hwpt, handle);
iommufd_fault_iopf_disable(idev);
}
kfree(handle);
}
static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
struct iommufd_hw_pagetable *hwpt,
struct iommufd_hw_pagetable *old)
{
struct iommufd_attach_handle *handle, *old_handle =
iommufd_device_get_attach_handle(idev);
int rc;
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
if (hwpt->fault && !old->fault) {
rc = iommufd_fault_iopf_enable(idev);
if (rc)
goto out_free_handle;
}
handle->idev = idev;
rc = iommu_replace_group_handle(idev->igroup->group, hwpt->domain,
&handle->handle);
if (rc)
goto out_disable_iopf;
if (old->fault) {
iommufd_auto_response_faults(hwpt, old_handle);
if (!hwpt->fault)
iommufd_fault_iopf_disable(idev);
}
kfree(old_handle);
return 0;
out_disable_iopf:
if (hwpt->fault && !old->fault)
iommufd_fault_iopf_disable(idev);
out_free_handle:
kfree(handle);
return rc;
}
int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev)
{
@@ -488,6 +604,11 @@ iommufd_device_do_replace(struct iommufd_device *idev,
goto err_unlock;
}
if (!iommufd_device_is_attached(idev)) {
rc = -EINVAL;
goto err_unlock;
}
if (hwpt == igroup->hwpt) {
mutex_unlock(&idev->igroup->lock);
return NULL;

View File

@@ -16,7 +16,7 @@
#include "../iommu-priv.h"
#include "iommufd_private.h"
static int iommufd_fault_iopf_enable(struct iommufd_device *idev)
int iommufd_fault_iopf_enable(struct iommufd_device *idev)
{
struct device *dev = idev->dev;
int ret;
@@ -45,7 +45,7 @@ static int iommufd_fault_iopf_enable(struct iommufd_device *idev)
return ret;
}
static void iommufd_fault_iopf_disable(struct iommufd_device *idev)
void iommufd_fault_iopf_disable(struct iommufd_device *idev)
{
mutex_lock(&idev->iopf_lock);
if (!WARN_ON(idev->iopf_enabled == 0)) {
@@ -93,8 +93,8 @@ int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
return ret;
}
static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
struct iommufd_attach_handle *handle)
void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
struct iommufd_attach_handle *handle)
{
struct iommufd_fault *fault = hwpt->fault;
struct iopf_group *group, *next;

View File

@@ -523,35 +523,10 @@ int iommufd_fault_domain_replace_dev(struct iommufd_device *idev,
struct iommufd_hw_pagetable *hwpt,
struct iommufd_hw_pagetable *old);
static inline int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev)
{
if (hwpt->fault)
return iommufd_fault_domain_attach_dev(hwpt, idev);
return iommu_attach_group(hwpt->domain, idev->igroup->group);
}
static inline void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev)
{
if (hwpt->fault) {
iommufd_fault_domain_detach_dev(hwpt, idev);
return;
}
iommu_detach_group(hwpt->domain, idev->igroup->group);
}
static inline int iommufd_hwpt_replace_device(struct iommufd_device *idev,
struct iommufd_hw_pagetable *hwpt,
struct iommufd_hw_pagetable *old)
{
if (old->fault || hwpt->fault)
return iommufd_fault_domain_replace_dev(idev, hwpt, old);
return iommu_group_replace_domain(idev->igroup->group, hwpt->domain);
}
int iommufd_fault_iopf_enable(struct iommufd_device *idev);
void iommufd_fault_iopf_disable(struct iommufd_device *idev);
void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
struct iommufd_attach_handle *handle);
#ifdef CONFIG_IOMMUFD_TEST
int iommufd_test(struct iommufd_ucmd *ucmd);

View File

@@ -390,6 +390,12 @@ static int ebs_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
static void ebs_postsuspend(struct dm_target *ti)
{
struct ebs_c *ec = ti->private;
dm_bufio_client_reset(ec->bufio);
}
static void ebs_status(struct dm_target *ti, status_type_t type,
unsigned int status_flags, char *result, unsigned int maxlen)
{
@@ -447,6 +453,7 @@ static struct target_type ebs_target = {
.ctr = ebs_ctr,
.dtr = ebs_dtr,
.map = ebs_map,
.postsuspend = ebs_postsuspend,
.status = ebs_status,
.io_hints = ebs_io_hints,
.prepare_ioctl = ebs_prepare_ioctl,

View File

@@ -21,6 +21,7 @@
#include <linux/reboot.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <crypto/utils.h>
#include <linux/async_tx.h>
#include <linux/dm-bufio.h>
@@ -516,7 +517,7 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr)
dm_integrity_io_error(ic, "crypto_shash_digest", r);
return r;
}
if (memcmp(mac, actual_mac, mac_size)) {
if (crypto_memneq(mac, actual_mac, mac_size)) {
dm_integrity_io_error(ic, "superblock mac", -EILSEQ);
dm_audit_log_target(DM_MSG_PREFIX, "mac-superblock", ic->ti, 0);
return -EILSEQ;
@@ -859,7 +860,7 @@ static void rw_section_mac(struct dm_integrity_c *ic, unsigned int section, bool
if (likely(wr))
memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
else {
if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) {
if (crypto_memneq(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) {
dm_integrity_io_error(ic, "journal mac", -EILSEQ);
dm_audit_log_target(DM_MSG_PREFIX, "mac-journal", ic->ti, 0);
}
@@ -1401,10 +1402,9 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
unsigned int *metadata_offset, unsigned int total_size, int op)
{
#define MAY_BE_FILLER 1
#define MAY_BE_HASH 2
unsigned int hash_offset = 0;
unsigned int may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
unsigned char mismatch_hash = 0;
unsigned char mismatch_filler = !ic->discard;
do {
unsigned char *data, *dp;
@@ -1425,7 +1425,7 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
if (op == TAG_READ) {
memcpy(tag, dp, to_copy);
} else if (op == TAG_WRITE) {
if (memcmp(dp, tag, to_copy)) {
if (crypto_memneq(dp, tag, to_copy)) {
memcpy(dp, tag, to_copy);
dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
}
@@ -1433,29 +1433,30 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
/* e.g.: op == TAG_CMP */
if (likely(is_power_of_2(ic->tag_size))) {
if (unlikely(memcmp(dp, tag, to_copy)))
if (unlikely(!ic->discard) ||
unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
goto thorough_test;
}
if (unlikely(crypto_memneq(dp, tag, to_copy)))
goto thorough_test;
} else {
unsigned int i, ts;
thorough_test:
ts = total_size;
for (i = 0; i < to_copy; i++, ts--) {
if (unlikely(dp[i] != tag[i]))
may_be &= ~MAY_BE_HASH;
if (likely(dp[i] != DISCARD_FILLER))
may_be &= ~MAY_BE_FILLER;
/*
* Warning: the control flow must not be
* dependent on match/mismatch of
* individual bytes.
*/
mismatch_hash |= dp[i] ^ tag[i];
mismatch_filler |= dp[i] ^ DISCARD_FILLER;
hash_offset++;
if (unlikely(hash_offset == ic->tag_size)) {
if (unlikely(!may_be)) {
if (unlikely(mismatch_hash) && unlikely(mismatch_filler)) {
dm_bufio_release(b);
return ts;
}
hash_offset = 0;
may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
mismatch_hash = 0;
mismatch_filler = !ic->discard;
}
}
}
@@ -1476,8 +1477,6 @@ thorough_test:
} while (unlikely(total_size));
return 0;
#undef MAY_BE_FILLER
#undef MAY_BE_HASH
}
struct flush_request {
@@ -2076,7 +2075,7 @@ retry_kmap:
char checksums_onstack[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
if (unlikely(crypto_memneq(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
logical_sector);
dm_audit_log_bio(DM_MSG_PREFIX, "journal-checksum",
@@ -2595,7 +2594,7 @@ static void dm_integrity_inline_recheck(struct work_struct *w)
bio_put(outgoing_bio);
integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, outgoing_data, digest);
if (unlikely(memcmp(digest, dio->integrity_payload, min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
if (unlikely(crypto_memneq(digest, dio->integrity_payload, min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
ic->dev->bdev, dio->bio_details.bi_iter.bi_sector);
atomic64_inc(&ic->number_of_mismatches);
@@ -2634,7 +2633,7 @@ static int dm_integrity_end_io(struct dm_target *ti, struct bio *bio, blk_status
char *mem = bvec_kmap_local(&bv);
//memset(mem, 0xff, ic->sectors_per_block << SECTOR_SHIFT);
integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, mem, digest);
if (unlikely(memcmp(digest, dio->integrity_payload + pos,
if (unlikely(crypto_memneq(digest, dio->integrity_payload + pos,
min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
kunmap_local(mem);
dm_integrity_free_payload(dio);
@@ -2911,7 +2910,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start
integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
(char *)access_journal_data(ic, i, l), test_tag);
if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) {
if (unlikely(crypto_memneq(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) {
dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0);
}
@@ -5081,16 +5080,19 @@ try_smaller_buffer:
ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
if (!ic->recalc_bitmap) {
ti->error = "Could not allocate memory for bitmap";
r = -ENOMEM;
goto bad;
}
ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
if (!ic->may_write_bitmap) {
ti->error = "Could not allocate memory for bitmap";
r = -ENOMEM;
goto bad;
}
ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
if (!ic->bbs) {
ti->error = "Could not allocate memory for bitmap";
r = -ENOMEM;
goto bad;
}

View File

@@ -906,6 +906,13 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
static void verity_postsuspend(struct dm_target *ti)
{
struct dm_verity *v = ti->private;
flush_workqueue(v->verify_wq);
dm_bufio_client_reset(v->bufio);
}
/*
* Status: V (valid) or C (corruption found)
*/
@@ -1881,6 +1888,7 @@ static struct target_type verity_target = {
.ctr = verity_ctr,
.dtr = verity_dtr,
.map = verity_map,
.postsuspend = verity_postsuspend,
.status = verity_status,
.prepare_ioctl = verity_prepare_ioctl,
.iterate_devices = verity_iterate_devices,

View File

@@ -1188,7 +1188,8 @@ err:
return ret;
}
static
/* clang stack usage explodes if this is inlined */
static noinline_for_stack
void vdec_vp9_slice_map_counts_eob_coef(unsigned int i, unsigned int j, unsigned int k,
struct vdec_vp9_slice_frame_counts *counts,
struct v4l2_vp9_frame_symbol_counts *counts_helper)

View File

@@ -234,7 +234,7 @@ static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
return true;
fail:
switch (irq_type) {
switch (test->irq_type) {
case IRQ_TYPE_INTX:
dev_err(dev, "Failed to request IRQ %d for Legacy\n",
pci_irq_vector(pdev, i));

View File

@@ -392,7 +392,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
*/
data[i++] = 0;
data[i++] = 0;
data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head;
data[i++] =
(tx->dqo_tx.tail - tx->dqo_tx.head) &
tx->mask;
}
do {
start =

View File

@@ -636,30 +636,16 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
mpc->rxbpre_total = 0;
for (i = 0; i < num_rxb; i++) {
if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
if (!va)
goto error;
page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
if (!page)
goto error;
page = virt_to_head_page(va);
/* Check if the frag falls back to single page */
if (compound_order(page) <
get_order(mpc->rxbpre_alloc_size)) {
put_page(page);
goto error;
}
} else {
page = dev_alloc_page();
if (!page)
goto error;
va = page_to_virt(page);
}
va = page_to_virt(page);
da = dma_map_single(dev, va + mpc->rxbpre_headroom,
mpc->rxbpre_datasize, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, da)) {
put_page(virt_to_head_page(va));
put_page(page);
goto error;
}
@@ -1618,7 +1604,7 @@ drop:
}
static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
dma_addr_t *da, bool *from_pool, bool is_napi)
dma_addr_t *da, bool *from_pool)
{
struct page *page;
void *va;
@@ -1629,21 +1615,6 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
if (rxq->xdp_save_va) {
va = rxq->xdp_save_va;
rxq->xdp_save_va = NULL;
} else if (rxq->alloc_size > PAGE_SIZE) {
if (is_napi)
va = napi_alloc_frag(rxq->alloc_size);
else
va = netdev_alloc_frag(rxq->alloc_size);
if (!va)
return NULL;
page = virt_to_head_page(va);
/* Check if the frag falls back to single page */
if (compound_order(page) < get_order(rxq->alloc_size)) {
put_page(page);
return NULL;
}
} else {
page = page_pool_dev_alloc_pages(rxq->page_pool);
if (!page)
@@ -1676,7 +1647,7 @@ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
dma_addr_t da;
void *va;
va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
if (!va)
return;
@@ -2083,7 +2054,7 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
if (mpc->rxbufs_pre)
va = mana_get_rxbuf_pre(rxq, &da);
else
va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
if (!va)
return -ENOMEM;
@@ -2169,6 +2140,7 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
pprm.nid = gc->numa_node;
pprm.napi = &rxq->rx_cq.napi;
pprm.netdev = rxq->ndev;
pprm.order = get_order(rxq->alloc_size);
rxq->page_pool = page_pool_create(&pprm);

View File

@@ -1353,7 +1353,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
qp_count = ilog2(qp_bitmap);
if (nt->use_msi) {
qp_count -= 1;
nt->msi_db_mask = 1 << qp_count;
nt->msi_db_mask = BIT_ULL(qp_count);
ntb_db_clear_mask(ndev, nt->msi_db_mask);
}

View File

@@ -16,6 +16,7 @@
#define pr_fmt(fmt) "OF: " fmt
#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/list.h>
@@ -38,11 +39,15 @@
unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
{
struct of_phandle_args oirq;
unsigned int ret;
if (of_irq_parse_one(dev, index, &oirq))
return 0;
return irq_create_of_mapping(&oirq);
ret = irq_create_of_mapping(&oirq);
of_node_put(oirq.np);
return ret;
}
EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
@@ -165,6 +170,8 @@ const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, struct of_ph
* the specifier for each map, and then returns the translated map.
*
* Return: 0 on success and a negative number on error
*
* Note: refcount of node @out_irq->np is increased by 1 on success.
*/
int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
{
@@ -310,6 +317,12 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
addrsize = (imap - match_array) - intsize;
if (ipar == newpar) {
/*
* We got @ipar's refcount, but the refcount was
* gotten again by of_irq_parse_imap_parent() via its
* alias @newpar.
*/
of_node_put(ipar);
pr_debug("%pOF interrupt-map entry to self\n", ipar);
return 0;
}
@@ -339,10 +352,12 @@ EXPORT_SYMBOL_GPL(of_irq_parse_raw);
* This function resolves an interrupt for a node by walking the interrupt tree,
* finding which interrupt controller node it is attached to, and returning the
* interrupt specifier that can be used to retrieve a Linux IRQ number.
*
* Note: refcount of node @out_irq->np is increased by 1 on success.
*/
int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_args *out_irq)
{
struct device_node *p;
struct device_node __free(device_node) *p = NULL;
const __be32 *addr;
u32 intsize;
int i, res, addr_len;
@@ -367,41 +382,33 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar
/* Try the new-style interrupts-extended first */
res = of_parse_phandle_with_args(device, "interrupts-extended",
"#interrupt-cells", index, out_irq);
if (!res)
return of_irq_parse_raw(addr_buf, out_irq);
if (!res) {
p = out_irq->np;
} else {
/* Look for the interrupt parent. */
p = of_irq_find_parent(device);
/* Get size of interrupt specifier */
if (!p || of_property_read_u32(p, "#interrupt-cells", &intsize))
return -EINVAL;
/* Look for the interrupt parent. */
p = of_irq_find_parent(device);
if (p == NULL)
return -EINVAL;
pr_debug(" parent=%pOF, intsize=%d\n", p, intsize);
/* Get size of interrupt specifier */
if (of_property_read_u32(p, "#interrupt-cells", &intsize)) {
res = -EINVAL;
goto out;
/* Copy intspec into irq structure */
out_irq->np = p;
out_irq->args_count = intsize;
for (i = 0; i < intsize; i++) {
res = of_property_read_u32_index(device, "interrupts",
(index * intsize) + i,
out_irq->args + i);
if (res)
return res;
}
pr_debug(" intspec=%d\n", *out_irq->args);
}
pr_debug(" parent=%pOF, intsize=%d\n", p, intsize);
/* Copy intspec into irq structure */
out_irq->np = p;
out_irq->args_count = intsize;
for (i = 0; i < intsize; i++) {
res = of_property_read_u32_index(device, "interrupts",
(index * intsize) + i,
out_irq->args + i);
if (res)
goto out;
}
pr_debug(" intspec=%d\n", *out_irq->args);
/* Check if there are any interrupt-map translations to process */
res = of_irq_parse_raw(addr_buf, out_irq);
out:
of_node_put(p);
return res;
return of_irq_parse_raw(addr_buf, out_irq);
}
EXPORT_SYMBOL_GPL(of_irq_parse_one);
@@ -505,8 +512,10 @@ int of_irq_count(struct device_node *dev)
struct of_phandle_args irq;
int nr = 0;
while (of_irq_parse_one(dev, nr, &irq) == 0)
while (of_irq_parse_one(dev, nr, &irq) == 0) {
of_node_put(irq.np);
nr++;
}
return nr;
}
@@ -623,6 +632,8 @@ void __init of_irq_init(const struct of_device_id *matches)
__func__, desc->dev, desc->dev,
desc->interrupt_parent);
of_node_clear_flag(desc->dev, OF_POPULATED);
of_node_put(desc->interrupt_parent);
of_node_put(desc->dev);
kfree(desc);
continue;
}
@@ -653,6 +664,7 @@ void __init of_irq_init(const struct of_device_id *matches)
err:
list_for_each_entry_safe(desc, temp_desc, &intc_desc_list, list) {
list_del(&desc->list);
of_node_put(desc->interrupt_parent);
of_node_put(desc->dev);
kfree(desc);
}

View File

@@ -355,6 +355,7 @@ static const struct j721e_pcie_data j7200_pcie_rc_data = {
static const struct j721e_pcie_data j7200_pcie_ep_data = {
.mode = PCI_MODE_EP,
.quirk_detect_quiet_flag = true,
.linkdown_irq_regfield = J7200_LINK_DOWN,
.quirk_disable_flr = true,
.max_lanes = 2,
};
@@ -376,13 +377,13 @@ static const struct j721e_pcie_data j784s4_pcie_rc_data = {
.mode = PCI_MODE_RC,
.quirk_retrain_flag = true,
.byte_access_allowed = false,
.linkdown_irq_regfield = LINK_DOWN,
.linkdown_irq_regfield = J7200_LINK_DOWN,
.max_lanes = 4,
};
static const struct j721e_pcie_data j784s4_pcie_ep_data = {
.mode = PCI_MODE_EP,
.linkdown_irq_regfield = LINK_DOWN,
.linkdown_irq_regfield = J7200_LINK_DOWN,
.max_lanes = 4,
};

View File

@@ -1786,7 +1786,7 @@ static struct pci_ops brcm7425_pcie_ops = {
static int brcm_pcie_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node, *msi_np;
struct device_node *np = pdev->dev.of_node;
struct pci_host_bridge *bridge;
const struct pcie_cfg_data *data;
struct brcm_pcie *pcie;
@@ -1890,9 +1890,14 @@ static int brcm_pcie_probe(struct platform_device *pdev)
goto fail;
}
msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
if (pci_msi_enabled() && msi_np == pcie->np) {
ret = brcm_pcie_enable_msi(pcie);
if (pci_msi_enabled()) {
struct device_node *msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
if (msi_np == pcie->np)
ret = brcm_pcie_enable_msi(pcie);
of_node_put(msi_np);
if (ret) {
dev_err(pcie->dev, "probe of internal MSI failed");
goto fail;

View File

@@ -40,7 +40,7 @@
* Legacy struct storing addresses to whole mapped BARs.
*/
struct pcim_iomap_devres {
void __iomem *table[PCI_STD_NUM_BARS];
void __iomem *table[PCI_NUM_RESOURCES];
};
/* Used to restore the old INTx state on driver detach. */

View File

@@ -286,9 +286,12 @@ static int pciehp_suspend(struct pcie_device *dev)
static bool pciehp_device_replaced(struct controller *ctrl)
{
struct pci_dev *pdev __free(pci_dev_put);
struct pci_dev *pdev __free(pci_dev_put) = NULL;
u32 reg;
if (pci_dev_is_disconnected(ctrl->pcie->port))
return false;
pdev = pci_get_slot(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0));
if (!pdev)
return true;

View File

@@ -910,6 +910,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
resource_size_t offset, next_offset;
LIST_HEAD(resources);
struct resource *res, *next_res;
bool bus_registered = false;
char addr[64], *fmt;
const char *name;
int err;
@@ -973,6 +974,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
name = dev_name(&bus->dev);
err = device_register(&bus->dev);
bus_registered = true;
if (err)
goto unregister;
@@ -1059,12 +1061,15 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
unregister:
put_device(&bridge->dev);
device_del(&bridge->dev);
free:
#ifdef CONFIG_PCI_DOMAINS_GENERIC
pci_bus_release_domain_nr(parent, bus->domain_nr);
#endif
kfree(bus);
if (bus_registered)
put_device(&bus->dev);
else
kfree(bus);
return err;
}
@@ -1173,7 +1178,10 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
add_dev:
pci_set_bus_msi_domain(child);
ret = device_register(&child->dev);
WARN_ON(ret < 0);
if (WARN_ON(ret < 0)) {
put_device(&child->dev);
return NULL;
}
pcibios_add_bus(child);

View File

@@ -162,6 +162,16 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
return ret;
}
static int imx8_pcie_phy_power_off(struct phy *phy)
{
struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy);
reset_control_assert(imx8_phy->reset);
reset_control_assert(imx8_phy->perst);
return 0;
}
static int imx8_pcie_phy_init(struct phy *phy)
{
struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy);
@@ -182,6 +192,7 @@ static const struct phy_ops imx8_pcie_phy_ops = {
.init = imx8_pcie_phy_init,
.exit = imx8_pcie_phy_exit,
.power_on = imx8_pcie_phy_power_on,
.power_off = imx8_pcie_phy_power_off,
.owner = THIS_MODULE,
};

View File

@@ -1044,8 +1044,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
const struct msm_pingroup *g;
u32 intr_target_mask = GENMASK(2, 0);
unsigned long flags;
bool was_enabled;
u32 val;
u32 val, oldval;
if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) {
set_bit(d->hwirq, pctrl->dual_edge_irqs);
@@ -1107,8 +1106,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
* internal circuitry of TLMM, toggling the RAW_STATUS
* could cause the INTR_STATUS to be set for EDGE interrupts.
*/
val = msm_readl_intr_cfg(pctrl, g);
was_enabled = val & BIT(g->intr_raw_status_bit);
val = oldval = msm_readl_intr_cfg(pctrl, g);
val |= BIT(g->intr_raw_status_bit);
if (g->intr_detection_width == 2) {
val &= ~(3 << g->intr_detection_bit);
@@ -1161,9 +1159,11 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
/*
* The first time we set RAW_STATUS_EN it could trigger an interrupt.
* Clear the interrupt. This is safe because we have
* IRQCHIP_SET_TYPE_MASKED.
* IRQCHIP_SET_TYPE_MASKED. When changing the interrupt type, we could
* also still have a non-matching interrupt latched, so clear whenever
* making changes to the interrupt configuration.
*/
if (!was_enabled)
if (val != oldval)
msm_ack_intr_status(pctrl, g);
if (test_bit(d->hwirq, pctrl->dual_edge_irqs))

View File

@@ -939,83 +939,83 @@ const struct samsung_pinctrl_of_match_data fsd_of_data __initconst = {
/* pin banks of gs101 pin-controller (ALIVE) */
static const struct samsung_pin_bank_data gs101_pin_alive[] = {
EXYNOS850_PIN_BANK_EINTW(8, 0x0, "gpa0", 0x00),
EXYNOS850_PIN_BANK_EINTW(7, 0x20, "gpa1", 0x04),
EXYNOS850_PIN_BANK_EINTW(5, 0x40, "gpa2", 0x08),
EXYNOS850_PIN_BANK_EINTW(4, 0x60, "gpa3", 0x0c),
EXYNOS850_PIN_BANK_EINTW(4, 0x80, "gpa4", 0x10),
EXYNOS850_PIN_BANK_EINTW(7, 0xa0, "gpa5", 0x14),
EXYNOS850_PIN_BANK_EINTW(8, 0xc0, "gpa9", 0x18),
EXYNOS850_PIN_BANK_EINTW(2, 0xe0, "gpa10", 0x1c),
GS101_PIN_BANK_EINTW(8, 0x0, "gpa0", 0x00, 0x00),
GS101_PIN_BANK_EINTW(7, 0x20, "gpa1", 0x04, 0x08),
GS101_PIN_BANK_EINTW(5, 0x40, "gpa2", 0x08, 0x10),
GS101_PIN_BANK_EINTW(4, 0x60, "gpa3", 0x0c, 0x18),
GS101_PIN_BANK_EINTW(4, 0x80, "gpa4", 0x10, 0x1c),
GS101_PIN_BANK_EINTW(7, 0xa0, "gpa5", 0x14, 0x20),
GS101_PIN_BANK_EINTW(8, 0xc0, "gpa9", 0x18, 0x28),
GS101_PIN_BANK_EINTW(2, 0xe0, "gpa10", 0x1c, 0x30),
};
/* pin banks of gs101 pin-controller (FAR_ALIVE) */
static const struct samsung_pin_bank_data gs101_pin_far_alive[] = {
EXYNOS850_PIN_BANK_EINTW(8, 0x0, "gpa6", 0x00),
EXYNOS850_PIN_BANK_EINTW(4, 0x20, "gpa7", 0x04),
EXYNOS850_PIN_BANK_EINTW(8, 0x40, "gpa8", 0x08),
EXYNOS850_PIN_BANK_EINTW(2, 0x60, "gpa11", 0x0c),
GS101_PIN_BANK_EINTW(8, 0x0, "gpa6", 0x00, 0x00),
GS101_PIN_BANK_EINTW(4, 0x20, "gpa7", 0x04, 0x08),
GS101_PIN_BANK_EINTW(8, 0x40, "gpa8", 0x08, 0x0c),
GS101_PIN_BANK_EINTW(2, 0x60, "gpa11", 0x0c, 0x14),
};
/* pin banks of gs101 pin-controller (GSACORE) */
static const struct samsung_pin_bank_data gs101_pin_gsacore[] = {
EXYNOS850_PIN_BANK_EINTG(2, 0x0, "gps0", 0x00),
EXYNOS850_PIN_BANK_EINTG(8, 0x20, "gps1", 0x04),
EXYNOS850_PIN_BANK_EINTG(3, 0x40, "gps2", 0x08),
GS101_PIN_BANK_EINTG(2, 0x0, "gps0", 0x00, 0x00),
GS101_PIN_BANK_EINTG(8, 0x20, "gps1", 0x04, 0x04),
GS101_PIN_BANK_EINTG(3, 0x40, "gps2", 0x08, 0x0c),
};
/* pin banks of gs101 pin-controller (GSACTRL) */
static const struct samsung_pin_bank_data gs101_pin_gsactrl[] = {
EXYNOS850_PIN_BANK_EINTW(6, 0x0, "gps3", 0x00),
GS101_PIN_BANK_EINTW(6, 0x0, "gps3", 0x00, 0x00),
};
/* pin banks of gs101 pin-controller (PERIC0) */
static const struct samsung_pin_bank_data gs101_pin_peric0[] = {
EXYNOS850_PIN_BANK_EINTG(5, 0x0, "gpp0", 0x00),
EXYNOS850_PIN_BANK_EINTG(4, 0x20, "gpp1", 0x04),
EXYNOS850_PIN_BANK_EINTG(4, 0x40, "gpp2", 0x08),
EXYNOS850_PIN_BANK_EINTG(2, 0x60, "gpp3", 0x0c),
EXYNOS850_PIN_BANK_EINTG(4, 0x80, "gpp4", 0x10),
EXYNOS850_PIN_BANK_EINTG(2, 0xa0, "gpp5", 0x14),
EXYNOS850_PIN_BANK_EINTG(4, 0xc0, "gpp6", 0x18),
EXYNOS850_PIN_BANK_EINTG(2, 0xe0, "gpp7", 0x1c),
EXYNOS850_PIN_BANK_EINTG(4, 0x100, "gpp8", 0x20),
EXYNOS850_PIN_BANK_EINTG(2, 0x120, "gpp9", 0x24),
EXYNOS850_PIN_BANK_EINTG(4, 0x140, "gpp10", 0x28),
EXYNOS850_PIN_BANK_EINTG(2, 0x160, "gpp11", 0x2c),
EXYNOS850_PIN_BANK_EINTG(4, 0x180, "gpp12", 0x30),
EXYNOS850_PIN_BANK_EINTG(2, 0x1a0, "gpp13", 0x34),
EXYNOS850_PIN_BANK_EINTG(4, 0x1c0, "gpp14", 0x38),
EXYNOS850_PIN_BANK_EINTG(2, 0x1e0, "gpp15", 0x3c),
EXYNOS850_PIN_BANK_EINTG(4, 0x200, "gpp16", 0x40),
EXYNOS850_PIN_BANK_EINTG(2, 0x220, "gpp17", 0x44),
EXYNOS850_PIN_BANK_EINTG(4, 0x240, "gpp18", 0x48),
EXYNOS850_PIN_BANK_EINTG(4, 0x260, "gpp19", 0x4c),
GS101_PIN_BANK_EINTG(5, 0x0, "gpp0", 0x00, 0x00),
GS101_PIN_BANK_EINTG(4, 0x20, "gpp1", 0x04, 0x08),
GS101_PIN_BANK_EINTG(4, 0x40, "gpp2", 0x08, 0x0c),
GS101_PIN_BANK_EINTG(2, 0x60, "gpp3", 0x0c, 0x10),
GS101_PIN_BANK_EINTG(4, 0x80, "gpp4", 0x10, 0x14),
GS101_PIN_BANK_EINTG(2, 0xa0, "gpp5", 0x14, 0x18),
GS101_PIN_BANK_EINTG(4, 0xc0, "gpp6", 0x18, 0x1c),
GS101_PIN_BANK_EINTG(2, 0xe0, "gpp7", 0x1c, 0x20),
GS101_PIN_BANK_EINTG(4, 0x100, "gpp8", 0x20, 0x24),
GS101_PIN_BANK_EINTG(2, 0x120, "gpp9", 0x24, 0x28),
GS101_PIN_BANK_EINTG(4, 0x140, "gpp10", 0x28, 0x2c),
GS101_PIN_BANK_EINTG(2, 0x160, "gpp11", 0x2c, 0x30),
GS101_PIN_BANK_EINTG(4, 0x180, "gpp12", 0x30, 0x34),
GS101_PIN_BANK_EINTG(2, 0x1a0, "gpp13", 0x34, 0x38),
GS101_PIN_BANK_EINTG(4, 0x1c0, "gpp14", 0x38, 0x3c),
GS101_PIN_BANK_EINTG(2, 0x1e0, "gpp15", 0x3c, 0x40),
GS101_PIN_BANK_EINTG(4, 0x200, "gpp16", 0x40, 0x44),
GS101_PIN_BANK_EINTG(2, 0x220, "gpp17", 0x44, 0x48),
GS101_PIN_BANK_EINTG(4, 0x240, "gpp18", 0x48, 0x4c),
GS101_PIN_BANK_EINTG(4, 0x260, "gpp19", 0x4c, 0x50),
};
/* pin banks of gs101 pin-controller (PERIC1) */
static const struct samsung_pin_bank_data gs101_pin_peric1[] = {
EXYNOS850_PIN_BANK_EINTG(8, 0x0, "gpp20", 0x00),
EXYNOS850_PIN_BANK_EINTG(4, 0x20, "gpp21", 0x04),
EXYNOS850_PIN_BANK_EINTG(2, 0x40, "gpp22", 0x08),
EXYNOS850_PIN_BANK_EINTG(8, 0x60, "gpp23", 0x0c),
EXYNOS850_PIN_BANK_EINTG(4, 0x80, "gpp24", 0x10),
EXYNOS850_PIN_BANK_EINTG(4, 0xa0, "gpp25", 0x14),
EXYNOS850_PIN_BANK_EINTG(5, 0xc0, "gpp26", 0x18),
EXYNOS850_PIN_BANK_EINTG(4, 0xe0, "gpp27", 0x1c),
GS101_PIN_BANK_EINTG(8, 0x0, "gpp20", 0x00, 0x00),
GS101_PIN_BANK_EINTG(4, 0x20, "gpp21", 0x04, 0x08),
GS101_PIN_BANK_EINTG(2, 0x40, "gpp22", 0x08, 0x0c),
GS101_PIN_BANK_EINTG(8, 0x60, "gpp23", 0x0c, 0x10),
GS101_PIN_BANK_EINTG(4, 0x80, "gpp24", 0x10, 0x18),
GS101_PIN_BANK_EINTG(4, 0xa0, "gpp25", 0x14, 0x1c),
GS101_PIN_BANK_EINTG(5, 0xc0, "gpp26", 0x18, 0x20),
GS101_PIN_BANK_EINTG(4, 0xe0, "gpp27", 0x1c, 0x28),
};
/* pin banks of gs101 pin-controller (HSI1) */
static const struct samsung_pin_bank_data gs101_pin_hsi1[] = {
EXYNOS850_PIN_BANK_EINTG(6, 0x0, "gph0", 0x00),
EXYNOS850_PIN_BANK_EINTG(7, 0x20, "gph1", 0x04),
GS101_PIN_BANK_EINTG(6, 0x0, "gph0", 0x00, 0x00),
GS101_PIN_BANK_EINTG(7, 0x20, "gph1", 0x04, 0x08),
};
/* pin banks of gs101 pin-controller (HSI2) */
static const struct samsung_pin_bank_data gs101_pin_hsi2[] = {
EXYNOS850_PIN_BANK_EINTG(6, 0x0, "gph2", 0x00),
EXYNOS850_PIN_BANK_EINTG(2, 0x20, "gph3", 0x04),
EXYNOS850_PIN_BANK_EINTG(6, 0x40, "gph4", 0x08),
GS101_PIN_BANK_EINTG(6, 0x0, "gph2", 0x00, 0x00),
GS101_PIN_BANK_EINTG(2, 0x20, "gph3", 0x04, 0x08),
GS101_PIN_BANK_EINTG(6, 0x40, "gph4", 0x08, 0x0c),
};
static const struct samsung_pin_ctrl gs101_pin_ctrl[] __initconst = {

View File

@@ -165,6 +165,28 @@
.name = id \
}
#define GS101_PIN_BANK_EINTG(pins, reg, id, offs, fltcon_offs) \
{ \
.type = &exynos850_bank_type_off, \
.pctl_offset = reg, \
.nr_pins = pins, \
.eint_type = EINT_TYPE_GPIO, \
.eint_offset = offs, \
.eint_fltcon_offset = fltcon_offs, \
.name = id \
}
#define GS101_PIN_BANK_EINTW(pins, reg, id, offs, fltcon_offs) \
{ \
.type = &exynos850_bank_type_alive, \
.pctl_offset = reg, \
.nr_pins = pins, \
.eint_type = EINT_TYPE_WKUP, \
.eint_offset = offs, \
.eint_fltcon_offset = fltcon_offs, \
.name = id \
}
/**
* struct exynos_weint_data: irq specific data for all the wakeup interrupts
* generated by the external wakeup interrupt controller.

View File

@@ -1230,6 +1230,7 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
bank->eint_con_offset = bdata->eint_con_offset;
bank->eint_mask_offset = bdata->eint_mask_offset;
bank->eint_pend_offset = bdata->eint_pend_offset;
bank->eint_fltcon_offset = bdata->eint_fltcon_offset;
bank->name = bdata->name;
raw_spin_lock_init(&bank->slock);

View File

@@ -144,6 +144,7 @@ struct samsung_pin_bank_type {
* @eint_con_offset: ExynosAuto SoC-specific EINT control register offset of bank.
* @eint_mask_offset: ExynosAuto SoC-specific EINT mask register offset of bank.
* @eint_pend_offset: ExynosAuto SoC-specific EINT pend register offset of bank.
* @eint_fltcon_offset: GS101 SoC-specific EINT filter config register offset.
* @name: name to be prefixed for each pin in this pin bank.
*/
struct samsung_pin_bank_data {
@@ -158,6 +159,7 @@ struct samsung_pin_bank_data {
u32 eint_con_offset;
u32 eint_mask_offset;
u32 eint_pend_offset;
u32 eint_fltcon_offset;
const char *name;
};
@@ -175,6 +177,7 @@ struct samsung_pin_bank_data {
* @eint_con_offset: ExynosAuto SoC-specific EINT register or interrupt offset of bank.
* @eint_mask_offset: ExynosAuto SoC-specific EINT mask register offset of bank.
* @eint_pend_offset: ExynosAuto SoC-specific EINT pend register offset of bank.
* @eint_fltcon_offset: GS101 SoC-specific EINT filter config register offset.
* @name: name to be prefixed for each pin in this pin bank.
* @id: id of the bank, propagated to the pin range.
* @pin_base: starting pin number of the bank.
@@ -201,6 +204,7 @@ struct samsung_pin_bank {
u32 eint_con_offset;
u32 eint_mask_offset;
u32 eint_pend_offset;
u32 eint_fltcon_offset;
const char *name;
u32 id;

View File

@@ -302,11 +302,17 @@ static struct airq_info *new_airq_info(int index)
static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs,
u64 *first, void **airq_info)
{
int i, j;
int i, j, queue_idx, highest_queue_idx = -1;
struct airq_info *info;
unsigned long *indicator_addr = NULL;
unsigned long bit, flags;
/* Array entries without an actual queue pointer must be ignored. */
for (i = 0; i < nvqs; i++) {
if (vqs[i])
highest_queue_idx++;
}
for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
mutex_lock(&airq_areas_lock);
if (!airq_areas[i])
@@ -316,7 +322,7 @@ static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs,
if (!info)
return NULL;
write_lock_irqsave(&info->lock, flags);
bit = airq_iv_alloc(info->aiv, nvqs);
bit = airq_iv_alloc(info->aiv, highest_queue_idx + 1);
if (bit == -1UL) {
/* Not enough vacancies. */
write_unlock_irqrestore(&info->lock, flags);
@@ -325,8 +331,10 @@ static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs,
*first = bit;
*airq_info = info;
indicator_addr = info->aiv->vector;
for (j = 0; j < nvqs; j++) {
airq_iv_set_ptr(info->aiv, bit + j,
for (j = 0, queue_idx = 0; j < nvqs; j++) {
if (!vqs[j])
continue;
airq_iv_set_ptr(info->aiv, bit + queue_idx++,
(unsigned long)vqs[j]);
}
write_unlock_irqrestore(&info->lock, flags);

View File

@@ -65,7 +65,7 @@
#define LVTS_HW_FILTER 0x0
#define LVTS_TSSEL_CONF 0x13121110
#define LVTS_CALSCALE_CONF 0x300
#define LVTS_MONINT_CONF 0x8300318C
#define LVTS_MONINT_CONF 0x0300318C
#define LVTS_MONINT_OFFSET_SENSOR0 0xC
#define LVTS_MONINT_OFFSET_SENSOR1 0x180
@@ -91,8 +91,6 @@
#define LVTS_MSR_READ_TIMEOUT_US 400
#define LVTS_MSR_READ_WAIT_US (LVTS_MSR_READ_TIMEOUT_US / 2)
#define LVTS_HW_TSHUT_TEMP 105000
#define LVTS_MINIMUM_THRESHOLD 20000
static int golden_temp = LVTS_GOLDEN_TEMP_DEFAULT;
@@ -145,7 +143,6 @@ struct lvts_ctrl {
struct lvts_sensor sensors[LVTS_SENSOR_MAX];
const struct lvts_data *lvts_data;
u32 calibration[LVTS_SENSOR_MAX];
u32 hw_tshut_raw_temp;
u8 valid_sensor_mask;
int mode;
void __iomem *base;
@@ -837,14 +834,6 @@ static int lvts_ctrl_init(struct device *dev, struct lvts_domain *lvts_td,
*/
lvts_ctrl[i].mode = lvts_data->lvts_ctrl[i].mode;
/*
* The temperature to raw temperature must be done
* after initializing the calibration.
*/
lvts_ctrl[i].hw_tshut_raw_temp =
lvts_temp_to_raw(LVTS_HW_TSHUT_TEMP,
lvts_data->temp_factor);
lvts_ctrl[i].low_thresh = INT_MIN;
lvts_ctrl[i].high_thresh = INT_MIN;
}
@@ -860,6 +849,32 @@ static int lvts_ctrl_init(struct device *dev, struct lvts_domain *lvts_td,
return 0;
}
static void lvts_ctrl_monitor_enable(struct device *dev, struct lvts_ctrl *lvts_ctrl, bool enable)
{
/*
* Bitmaps to enable each sensor on filtered mode in the MONCTL0
* register.
*/
static const u8 sensor_filt_bitmap[] = { BIT(0), BIT(1), BIT(2), BIT(3) };
u32 sensor_map = 0;
int i;
if (lvts_ctrl->mode != LVTS_MSR_FILTERED_MODE)
return;
if (enable) {
lvts_for_each_valid_sensor(i, lvts_ctrl)
sensor_map |= sensor_filt_bitmap[i];
}
/*
* Bits:
* 9: Single point access flow
* 0-3: Enable sensing point 0-3
*/
writel(sensor_map | BIT(9), LVTS_MONCTL0(lvts_ctrl->base));
}
/*
* At this point the configuration register is the only place in the
* driver where we write multiple values. Per hardware constraint,
@@ -893,7 +908,6 @@ static int lvts_irq_init(struct lvts_ctrl *lvts_ctrl)
* 10 : Selected sensor with bits 19-18
* 11 : Reserved
*/
writel(BIT(16), LVTS_PROTCTL(lvts_ctrl->base));
/*
* LVTS_PROTTA : Stage 1 temperature threshold
@@ -906,8 +920,8 @@ static int lvts_irq_init(struct lvts_ctrl *lvts_ctrl)
*
* writel(0x0, LVTS_PROTTA(lvts_ctrl->base));
* writel(0x0, LVTS_PROTTB(lvts_ctrl->base));
* writel(0x0, LVTS_PROTTC(lvts_ctrl->base));
*/
writel(lvts_ctrl->hw_tshut_raw_temp, LVTS_PROTTC(lvts_ctrl->base));
/*
* LVTS_MONINT : Interrupt configuration register
@@ -1381,8 +1395,11 @@ static int lvts_suspend(struct device *dev)
lvts_td = dev_get_drvdata(dev);
for (i = 0; i < lvts_td->num_lvts_ctrl; i++)
for (i = 0; i < lvts_td->num_lvts_ctrl; i++) {
lvts_ctrl_monitor_enable(dev, &lvts_td->lvts_ctrl[i], false);
usleep_range(100, 200);
lvts_ctrl_set_enable(&lvts_td->lvts_ctrl[i], false);
}
clk_disable_unprepare(lvts_td->clk);
@@ -1400,8 +1417,11 @@ static int lvts_resume(struct device *dev)
if (ret)
return ret;
for (i = 0; i < lvts_td->num_lvts_ctrl; i++)
for (i = 0; i < lvts_td->num_lvts_ctrl; i++) {
lvts_ctrl_set_enable(&lvts_td->lvts_ctrl[i], true);
usleep_range(100, 200);
lvts_ctrl_monitor_enable(dev, &lvts_td->lvts_ctrl[i], true);
}
return 0;
}

View File

@@ -386,6 +386,7 @@ static const struct tsadc_table rk3328_code_table[] = {
{296, -40000},
{304, -35000},
{313, -30000},
{322, -25000},
{331, -20000},
{340, -15000},
{349, -10000},

View File

@@ -675,7 +675,7 @@ void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages)
}
EXPORT_SYMBOL(xen_free_ballooned_pages);
static void __init balloon_add_regions(void)
static int __init balloon_add_regions(void)
{
unsigned long start_pfn, pages;
unsigned long pfn, extra_pfn_end;
@@ -698,26 +698,38 @@ static void __init balloon_add_regions(void)
for (pfn = start_pfn; pfn < extra_pfn_end; pfn++)
balloon_append(pfn_to_page(pfn));
balloon_stats.total_pages += extra_pfn_end - start_pfn;
/*
* Extra regions are accounted for in the physmap, but need
* decreasing from current_pages to balloon down the initial
* allocation, because they are already accounted for in
* total_pages.
*/
if (extra_pfn_end - start_pfn >= balloon_stats.current_pages) {
WARN(1, "Extra pages underflow current target");
return -ERANGE;
}
balloon_stats.current_pages -= extra_pfn_end - start_pfn;
}
return 0;
}
static int __init balloon_init(void)
{
struct task_struct *task;
int rc;
if (!xen_domain())
return -ENODEV;
pr_info("Initialising balloon driver\n");
#ifdef CONFIG_XEN_PV
balloon_stats.current_pages = xen_pv_domain()
? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
: get_num_physpages();
#else
balloon_stats.current_pages = get_num_physpages();
#endif
if (xen_released_pages >= get_num_physpages()) {
WARN(1, "Released pages underflow current target");
return -ERANGE;
}
balloon_stats.current_pages = get_num_physpages() - xen_released_pages;
balloon_stats.target_pages = balloon_stats.current_pages;
balloon_stats.balloon_low = 0;
balloon_stats.balloon_high = 0;
@@ -734,7 +746,9 @@ static int __init balloon_init(void)
register_sysctl_init("xen/balloon", balloon_table);
#endif
balloon_add_regions();
rc = balloon_add_regions();
if (rc)
return rc;
task = kthread_run(balloon_thread, NULL, "xen-balloon");
if (IS_ERR(task)) {

View File

@@ -741,6 +741,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
read_lock_bh(&ls->ls_rsbtbl_lock);
if (!rsb_flag(r, RSB_HASHED)) {
read_unlock_bh(&ls->ls_rsbtbl_lock);
error = -EBADR;
goto do_new;
}
@@ -784,6 +785,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
}
} else {
write_unlock_bh(&ls->ls_rsbtbl_lock);
error = -EBADR;
goto do_new;
}

View File

@@ -605,7 +605,7 @@ static int nfs4_xdr_dec_cb_getattr(struct rpc_rqst *rqstp,
return status;
status = decode_cb_op_status(xdr, OP_CB_GETATTR, &cb->cb_status);
if (status)
if (unlikely(status || cb->cb_status))
return status;
if (xdr_stream_decode_uint32_array(xdr, bitmap, 3) < 0)
return -NFSERR_BAD_XDR;

View File

@@ -2244,8 +2244,14 @@ static __net_init int nfsd_net_init(struct net *net)
NFSD_STATS_COUNTERS_NUM);
if (retval)
goto out_repcache_error;
memset(&nn->nfsd_svcstats, 0, sizeof(nn->nfsd_svcstats));
nn->nfsd_svcstats.program = &nfsd_programs[0];
if (!nfsd_proc_stat_init(net)) {
retval = -ENOMEM;
goto out_proc_error;
}
for (i = 0; i < sizeof(nn->nfsd_versions); i++)
nn->nfsd_versions[i] = nfsd_support_version(i);
for (i = 0; i < sizeof(nn->nfsd4_minorversions); i++)
@@ -2255,12 +2261,13 @@ static __net_init int nfsd_net_init(struct net *net)
nfsd4_init_leases_net(nn);
get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
seqlock_init(&nn->writeverf_lock);
nfsd_proc_stat_init(net);
#if IS_ENABLED(CONFIG_NFS_LOCALIO)
INIT_LIST_HEAD(&nn->local_clients);
#endif
return 0;
out_proc_error:
percpu_counter_destroy_many(nn->counter, NFSD_STATS_COUNTERS_NUM);
out_repcache_error:
nfsd_idmap_shutdown(net);
out_idmap_error:

View File

@@ -73,11 +73,11 @@ static int nfsd_show(struct seq_file *seq, void *v)
DEFINE_PROC_SHOW_ATTRIBUTE(nfsd);
void nfsd_proc_stat_init(struct net *net)
struct proc_dir_entry *nfsd_proc_stat_init(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops);
return svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops);
}
void nfsd_proc_stat_shutdown(struct net *net)

View File

@@ -10,7 +10,7 @@
#include <uapi/linux/nfsd/stats.h>
#include <linux/percpu_counter.h>
void nfsd_proc_stat_init(struct net *net);
struct proc_dir_entry *nfsd_proc_stat_init(struct net *net);
void nfsd_proc_stat_shutdown(struct net *net);
static inline void nfsd_stats_rc_hits_inc(struct nfsd_net *nn)

View File

@@ -1722,6 +1722,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
/* Grab netns reference for this server. */
cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
tcp_ses->sign = ctx->sign;
tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId);
tcp_ses->noblockcnt = ctx->rootfs;
tcp_ses->noblocksnd = ctx->noblocksnd || ctx->rootfs;

View File

@@ -1287,6 +1287,11 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
ctx->closetimeo = HZ * result.uint_32;
break;
case Opt_echo_interval:
if (result.uint_32 < SMB_ECHO_INTERVAL_MIN ||
result.uint_32 > SMB_ECHO_INTERVAL_MAX) {
cifs_errorf(fc, "echo interval is out of bounds\n");
goto cifs_parse_mount_err;
}
ctx->echo_interval = result.uint_32;
break;
case Opt_snapshot:

View File

@@ -1206,6 +1206,16 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
cifs_create_junction_fattr(fattr, sb);
goto out;
}
/*
* If the reparse point is unsupported by the Linux SMB
* client then let it process by the SMB server. So mask
* the -EOPNOTSUPP error code. This will allow Linux SMB
* client to send SMB OPEN request to server. If server
* does not support this reparse point too then server
* will return error during open the path.
*/
if (rc == -EOPNOTSUPP)
rc = 0;
}
break;
}

View File

@@ -633,8 +633,6 @@ int parse_reparse_point(struct reparse_data_buffer *buf,
const char *full_path,
bool unicode, struct cifs_open_info_data *data)
{
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
data->reparse.buf = buf;
/* See MS-FSCC 2.1.2 */
@@ -658,8 +656,6 @@ int parse_reparse_point(struct reparse_data_buffer *buf,
}
return 0;
default:
cifs_tcon_dbg(VFS | ONCE, "unhandled reparse tag: 0x%08x\n",
le32_to_cpu(buf->ReparseTag));
return -EOPNOTSUPP;
}
}

View File

@@ -550,6 +550,13 @@ cifs_ses_add_channel(struct cifs_ses *ses,
ctx->sockopt_tcp_nodelay = ses->server->tcp_nodelay;
ctx->echo_interval = ses->server->echo_interval / HZ;
ctx->max_credits = ses->server->max_credits;
ctx->min_offload = ses->server->min_offload;
ctx->compress = ses->server->compression.requested;
ctx->dfs_conn = ses->server->dfs_conn;
ctx->ignore_signature = ses->server->ignore_signature;
ctx->leaf_fullpath = ses->server->leaf_fullpath;
ctx->rootfs = ses->server->noblockcnt;
ctx->retrans = ses->server->retrans;
/*
* This will be used for encoding/decoding user/domain/pw

View File

@@ -816,11 +816,12 @@ smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
WARN_ONCE(tcon->tc_count < 0, "tcon refcount is negative");
spin_unlock(&cifs_tcp_ses_lock);
if (tcon->ses)
if (tcon->ses) {
server = tcon->ses->server;
cifs_server_dbg(FYI, "tid=0x%x: tcon is closing, skipping async close retry of fid %llu %llu\n",
tcon->tid, persistent_fid, volatile_fid);
cifs_server_dbg(FYI,
"tid=0x%x: tcon is closing, skipping async close retry of fid %llu %llu\n",
tcon->tid, persistent_fid, volatile_fid);
}
return 0;
}

View File

@@ -395,32 +395,6 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
if (!(vmf->flags & FAULT_FLAG_USER) && (ctx->flags & UFFD_USER_MODE_ONLY))
goto out;
/*
* If it's already released don't get it. This avoids to loop
* in __get_user_pages if userfaultfd_release waits on the
* caller of handle_userfault to release the mmap_lock.
*/
if (unlikely(READ_ONCE(ctx->released))) {
/*
* Don't return VM_FAULT_SIGBUS in this case, so a non
* cooperative manager can close the uffd after the
* last UFFDIO_COPY, without risking to trigger an
* involuntary SIGBUS if the process was starting the
* userfaultfd while the userfaultfd was still armed
* (but after the last UFFDIO_COPY). If the uffd
* wasn't already closed when the userfault reached
* this point, that would normally be solved by
* userfaultfd_must_wait returning 'false'.
*
* If we were to return VM_FAULT_SIGBUS here, the non
* cooperative manager would be instead forced to
* always call UFFDIO_UNREGISTER before it can safely
* close the uffd.
*/
ret = VM_FAULT_NOPAGE;
goto out;
}
/*
* Check that we can return VM_FAULT_RETRY.
*
@@ -457,6 +431,31 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out;
if (unlikely(READ_ONCE(ctx->released))) {
/*
* If a concurrent release is detected, do not return
* VM_FAULT_SIGBUS or VM_FAULT_NOPAGE, but instead always
* return VM_FAULT_RETRY with lock released proactively.
*
* If we were to return VM_FAULT_SIGBUS here, the non
* cooperative manager would be instead forced to
* always call UFFDIO_UNREGISTER before it can safely
* close the uffd, to avoid involuntary SIGBUS triggered.
*
* If we were to return VM_FAULT_NOPAGE, it would work for
* the fault path, in which the lock will be released
* later. However for GUP, faultin_page() does nothing
* special on NOPAGE, so GUP would spin retrying without
* releasing the mmap read lock, causing possible livelock.
*
* Here only VM_FAULT_RETRY would make sure the mmap lock
* be released immediately, so that the thread concurrently
* releasing the userfault would always make progress.
*/
release_fault_lock(vmf);
goto out;
}
/* take the reference before dropping the mmap_lock */
userfaultfd_ctx_get(ctx);

View File

@@ -2356,7 +2356,7 @@ static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
struct kvm_vcpu *kvm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
bool kvm_arch_has_irq_bypass(void);
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
struct irq_bypass_producer *);

View File

@@ -1121,6 +1121,12 @@ static inline bool is_page_hwpoison(const struct page *page)
return folio_test_hugetlb(folio) && PageHWPoison(&folio->page);
}
static inline bool folio_contain_hwpoisoned_page(struct folio *folio)
{
return folio_test_hwpoison(folio) ||
(folio_test_large(folio) && folio_test_has_hwpoisoned(folio));
}
bool is_free_buddy_page(const struct page *page);
PAGEFLAG(Isolated, isolated, PF_ANY);

View File

@@ -201,10 +201,14 @@ static inline int pmd_dirty(pmd_t pmd)
* hazard could result in the direct mode hypervisor case, since the actual
* write to the page tables may not yet have taken place, so reads though
* a raw PTE pointer after it has been modified are not guaranteed to be
* up to date. This mode can only be entered and left under the protection of
* the page table locks for all page tables which may be modified. In the UP
* case, this is required so that preemption is disabled, and in the SMP case,
* it must synchronize the delayed page table writes properly on other CPUs.
* up to date.
*
* In the general case, no lock is guaranteed to be held between entry and exit
* of the lazy mode. So the implementation must assume preemption may be enabled
* and cpu migration is possible; it must take steps to be robust against this.
* (In practice, for user PTE updates, the appropriate page table lock(s) are
* held, but for kernel PTE updates, no lock is held). Nesting is not permitted
* and the mode cannot be used in interrupt context.
*/
#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
#define arch_enter_lazy_mmu_mode() do {} while (0)
@@ -266,7 +270,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
{
page_table_check_ptes_set(mm, ptep, pte, nr);
arch_enter_lazy_mmu_mode();
for (;;) {
set_pte(ptep, pte);
if (--nr == 0)
@@ -274,7 +277,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
ptep++;
pte = pte_next_pfn(pte);
}
arch_leave_lazy_mmu_mode();
}
#endif
#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)

View File

@@ -775,6 +775,7 @@ struct sctp_transport {
/* Reference counting. */
refcount_t refcnt;
__u32 dead:1,
/* RTO-Pending : A flag used to track if one of the DATA
* chunks sent to this address is currently being
* used to compute a RTT. If this flag is 0,
@@ -784,7 +785,7 @@ struct sctp_transport {
* calculation completes (i.e. the DATA chunk
* is SACK'd) clear this flag.
*/
__u32 rto_pending:1,
rto_pending:1,
/*
* hb_sent : a flag that signals that we have a pending

View File

@@ -57,9 +57,11 @@ struct landlock_ruleset_attr {
*
* - %LANDLOCK_CREATE_RULESET_VERSION: Get the highest supported Landlock ABI
* version.
* - %LANDLOCK_CREATE_RULESET_ERRATA: Get a bitmask of fixed issues.
*/
/* clang-format off */
#define LANDLOCK_CREATE_RULESET_VERSION (1U << 0)
#define LANDLOCK_CREATE_RULESET_ERRATA (1U << 1)
/* clang-format on */
/**

View File

@@ -73,13 +73,20 @@ typedef enum {
SEV_RET_INVALID_PARAM,
SEV_RET_RESOURCE_LIMIT,
SEV_RET_SECURE_DATA_INVALID,
SEV_RET_INVALID_KEY = 0x27,
SEV_RET_INVALID_PAGE_SIZE,
SEV_RET_INVALID_PAGE_STATE,
SEV_RET_INVALID_MDATA_ENTRY,
SEV_RET_INVALID_PAGE_OWNER,
SEV_RET_INVALID_PAGE_AEAD_OFLOW,
SEV_RET_RMP_INIT_REQUIRED,
SEV_RET_INVALID_PAGE_SIZE = 0x0019,
SEV_RET_INVALID_PAGE_STATE = 0x001A,
SEV_RET_INVALID_MDATA_ENTRY = 0x001B,
SEV_RET_INVALID_PAGE_OWNER = 0x001C,
SEV_RET_AEAD_OFLOW = 0x001D,
SEV_RET_EXIT_RING_BUFFER = 0x001F,
SEV_RET_RMP_INIT_REQUIRED = 0x0020,
SEV_RET_BAD_SVN = 0x0021,
SEV_RET_BAD_VERSION = 0x0022,
SEV_RET_SHUTDOWN_REQUIRED = 0x0023,
SEV_RET_UPDATE_FAILED = 0x0024,
SEV_RET_RESTORE_REQUIRED = 0x0025,
SEV_RET_RMP_INITIALIZATION_FAILED = 0x0026,
SEV_RET_INVALID_KEY = 0x0027,
SEV_RET_MAX,
} sev_ret_code;

View File

@@ -4178,8 +4178,8 @@ static struct scx_dispatch_q *create_dsq(u64 dsq_id, int node)
init_dsq(dsq, dsq_id);
ret = rhashtable_insert_fast(&dsq_hash, &dsq->hash_node,
dsq_hash_params);
ret = rhashtable_lookup_insert_fast(&dsq_hash, &dsq->hash_node,
dsq_hash_params);
if (ret) {
kfree(dsq);
return ERR_PTR(ret);

View File

@@ -3523,16 +3523,16 @@ int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int
ftrace_hash_empty(subops->func_hash->notrace_hash)) {
notrace_hash = EMPTY_HASH;
} else {
size_bits = max(ops->func_hash->filter_hash->size_bits,
subops->func_hash->filter_hash->size_bits);
size_bits = max(ops->func_hash->notrace_hash->size_bits,
subops->func_hash->notrace_hash->size_bits);
notrace_hash = alloc_ftrace_hash(size_bits);
if (!notrace_hash) {
free_ftrace_hash(filter_hash);
return -ENOMEM;
}
ret = intersect_hash(&notrace_hash, ops->func_hash->filter_hash,
subops->func_hash->filter_hash);
ret = intersect_hash(&notrace_hash, ops->func_hash->notrace_hash,
subops->func_hash->notrace_hash);
if (ret < 0) {
free_ftrace_hash(filter_hash);
free_ftrace_hash(notrace_hash);
@@ -6848,6 +6848,7 @@ ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
}
}
}
cond_resched();
} while_for_each_ftrace_rec();
out:
mutex_unlock(&ftrace_lock);

View File

@@ -6054,7 +6054,7 @@ static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
meta->read = cpu_buffer->read;
/* Some archs do not have data cache coherency between kernel and user-space */
flush_dcache_folio(virt_to_folio(cpu_buffer->meta_page));
flush_kernel_vmap_range(cpu_buffer->meta_page, PAGE_SIZE);
}
static void
@@ -7432,7 +7432,8 @@ consume:
out:
/* Some archs do not have data cache coherency between kernel and user-space */
flush_dcache_folio(virt_to_folio(cpu_buffer->reader_page->page));
flush_kernel_vmap_range(cpu_buffer->reader_page->page,
buffer->subbuf_size + BUF_PAGE_HDR_SIZE);
rb_update_meta_page(cpu_buffer);

View File

@@ -377,7 +377,6 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter,
union trace_synth_field *data = &entry->fields[n_u64];
trace_seq_printf(s, print_fmt, se->fields[i]->name,
STR_VAR_LEN_MAX,
(char *)entry + data->as_dynamic.offset,
i == se->n_fields - 1 ? "" : " ");
n_u64++;

View File

@@ -888,9 +888,15 @@ static void __find_tracepoint_module_cb(struct tracepoint *tp, struct module *mo
struct __find_tracepoint_cb_data *data = priv;
if (!data->tpoint && !strcmp(data->tp_name, tp->name)) {
data->tpoint = tp;
if (!data->mod)
/* If module is not specified, try getting module refcount. */
if (!data->mod && mod) {
/* If failed to get refcount, ignore this tracepoint. */
if (!try_module_get(mod))
return;
data->mod = mod;
}
data->tpoint = tp;
}
}
@@ -902,7 +908,11 @@ static void __find_tracepoint_cb(struct tracepoint *tp, void *priv)
data->tpoint = tp;
}
/* Find a tracepoint from kernel and module. */
/*
* Find a tracepoint from kernel and module. If the tracepoint is on the module,
* the module's refcount is incremented and returned as *@tp_mod. Thus, if it is
* not NULL, caller must call module_put(*tp_mod) after used the tracepoint.
*/
static struct tracepoint *find_tracepoint(const char *tp_name,
struct module **tp_mod)
{
@@ -931,7 +941,10 @@ static void reenable_trace_fprobe(struct trace_fprobe *tf)
}
}
/* Find a tracepoint from specified module. */
/*
* Find a tracepoint from specified module. In this case, this does not get the
* module's refcount. The caller must ensure the module is not freed.
*/
static struct tracepoint *find_tracepoint_in_module(struct module *mod,
const char *tp_name)
{
@@ -1167,11 +1180,6 @@ static int __trace_fprobe_create(int argc, const char *argv[])
if (is_tracepoint) {
ctx.flags |= TPARG_FL_TPOINT;
tpoint = find_tracepoint(symbol, &tp_mod);
/* lock module until register this tprobe. */
if (tp_mod && !try_module_get(tp_mod)) {
tpoint = NULL;
tp_mod = NULL;
}
if (tpoint) {
ctx.funcname = kallsyms_lookup(
(unsigned long)tpoint->probestub,

View File

@@ -24,7 +24,7 @@ struct folio *damon_get_folio(unsigned long pfn)
struct page *page = pfn_to_online_page(pfn);
struct folio *folio;
if (!page || PageTail(page))
if (!page)
return NULL;
folio = page_folio(page);

View File

@@ -264,11 +264,14 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
damos_add_filter(s, filter);
}
for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
addr = r->ar.start;
while (addr < r->ar.end) {
struct folio *folio = damon_get_folio(PHYS_PFN(addr));
if (!folio)
if (!folio) {
addr += PAGE_SIZE;
continue;
}
if (damos_pa_filter_out(s, folio))
goto put_folio;
@@ -282,6 +285,7 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
else
list_add(&folio->lru, &folio_list);
put_folio:
addr += folio_size(folio);
folio_put(folio);
}
if (install_young_filter)
@@ -296,11 +300,14 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
{
unsigned long addr, applied = 0;
for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
addr = r->ar.start;
while (addr < r->ar.end) {
struct folio *folio = damon_get_folio(PHYS_PFN(addr));
if (!folio)
if (!folio) {
addr += PAGE_SIZE;
continue;
}
if (damos_pa_filter_out(s, folio))
goto put_folio;
@@ -311,6 +318,7 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
folio_deactivate(folio);
applied += folio_nr_pages(folio);
put_folio:
addr += folio_size(folio);
folio_put(folio);
}
return applied * PAGE_SIZE;
@@ -454,11 +462,14 @@ static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s)
unsigned long addr, applied;
LIST_HEAD(folio_list);
for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
addr = r->ar.start;
while (addr < r->ar.end) {
struct folio *folio = damon_get_folio(PHYS_PFN(addr));
if (!folio)
if (!folio) {
addr += PAGE_SIZE;
continue;
}
if (damos_pa_filter_out(s, folio))
goto put_folio;
@@ -467,6 +478,7 @@ static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s)
goto put_folio;
list_add(&folio->lru, &folio_list);
put_folio:
addr += folio_size(folio);
folio_put(folio);
}
applied = damon_pa_migrate_pages(&folio_list, s->target_nid);

View File

@@ -4863,7 +4863,7 @@ static struct ctl_table hugetlb_table[] = {
},
};
static void hugetlb_sysctl_init(void)
static void __init hugetlb_sysctl_init(void)
{
register_sysctl_init("vm", hugetlb_table);
}

View File

@@ -879,12 +879,17 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
mmap_read_lock(p->mm);
ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops,
(void *)&priv);
/*
* ret = 1 when CMCI wins, regardless of whether try_to_unmap()
* succeeds or fails, then kill the process with SIGBUS.
* ret = 0 when poison page is a clean page and it's dropped, no
* SIGBUS is needed.
*/
if (ret == 1 && priv.tk.addr)
kill_proc(&priv.tk, pfn, flags);
else
ret = 0;
mmap_read_unlock(p->mm);
return ret > 0 ? -EHWPOISON : -EFAULT;
return ret > 0 ? -EHWPOISON : 0;
}
/*

View File

@@ -1801,8 +1801,7 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (unlikely(page_folio(page) != folio))
goto put_folio;
if (folio_test_hwpoison(folio) ||
(folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) {
if (folio_contain_hwpoisoned_page(folio)) {
if (WARN_ON(folio_test_lru(folio)))
folio_isolate_lru(folio);
if (folio_mapped(folio)) {

View File

@@ -705,8 +705,8 @@ static unsigned long move_vma(struct vm_area_struct *vma,
unsigned long vm_flags = vma->vm_flags;
unsigned long new_pgoff;
unsigned long moved_len;
unsigned long account_start = 0;
unsigned long account_end = 0;
bool account_start = false;
bool account_end = false;
unsigned long hiwater_vm;
int err = 0;
bool need_rmap_locks;
@@ -790,9 +790,9 @@ static unsigned long move_vma(struct vm_area_struct *vma,
if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
vm_flags_clear(vma, VM_ACCOUNT);
if (vma->vm_start < old_addr)
account_start = vma->vm_start;
account_start = true;
if (vma->vm_end > old_addr + old_len)
account_end = vma->vm_end;
account_end = true;
}
/*
@@ -832,7 +832,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
/* OOM: unable to split vma, just get accounts right */
if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
vm_acct_memory(old_len >> PAGE_SHIFT);
account_start = account_end = 0;
account_start = account_end = false;
}
if (vm_flags & VM_LOCKED) {

View File

@@ -84,6 +84,7 @@ again:
* mapped at the @pvmw->pte
* @pvmw: page_vma_mapped_walk struct, includes a pair pte and pfn range
* for checking
* @pte_nr: the number of small pages described by @pvmw->pte.
*
* page_vma_mapped_walk() found a place where pfn range is *potentially*
* mapped. check_pte() has to validate this.
@@ -100,7 +101,7 @@ again:
* Otherwise, return false.
*
*/
static bool check_pte(struct page_vma_mapped_walk *pvmw)
static bool check_pte(struct page_vma_mapped_walk *pvmw, unsigned long pte_nr)
{
unsigned long pfn;
pte_t ptent = ptep_get(pvmw->pte);
@@ -133,7 +134,11 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
pfn = pte_pfn(ptent);
}
return (pfn - pvmw->pfn) < pvmw->nr_pages;
if ((pfn + pte_nr - 1) < pvmw->pfn)
return false;
if (pfn > (pvmw->pfn + pvmw->nr_pages - 1))
return false;
return true;
}
/* Returns true if the two ranges overlap. Careful to not overflow. */
@@ -208,7 +213,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
return false;
pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte);
if (!check_pte(pvmw))
if (!check_pte(pvmw, pages_per_huge_page(hstate)))
return not_found(pvmw);
return true;
}
@@ -291,7 +296,7 @@ restart:
goto next_pte;
}
this_pte:
if (check_pte(pvmw))
if (check_pte(pvmw, 1))
return true;
next_pte:
do {

View File

@@ -2494,7 +2494,7 @@ static bool folio_make_device_exclusive(struct folio *folio,
* Restrict to anonymous folios for now to avoid potential writeback
* issues.
*/
if (!folio_test_anon(folio))
if (!folio_test_anon(folio) || folio_test_hugetlb(folio))
return false;
rmap_walk(folio, &rwc);

View File

@@ -3069,8 +3069,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
if (ret)
return ret;
if (folio_test_hwpoison(folio) ||
(folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) {
if (folio_contain_hwpoisoned_page(folio)) {
folio_unlock(folio);
folio_put(folio);
return -EIO;

View File

@@ -7742,7 +7742,7 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
return NODE_RECLAIM_NOSCAN;
ret = __node_reclaim(pgdat, gfp_mask, order);
clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
clear_bit_unlock(PGDAT_RECLAIM_LOCKED, &pgdat->flags);
if (ret)
count_vm_event(PGSCAN_ZONE_RECLAIM_SUCCESS);

View File

@@ -72,8 +72,9 @@
/* Forward declarations for internal helper functions. */
static bool sctp_writeable(const struct sock *sk);
static void sctp_wfree(struct sk_buff *skb);
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
size_t msg_len);
static int sctp_wait_for_sndbuf(struct sctp_association *asoc,
struct sctp_transport *transport,
long *timeo_p, size_t msg_len);
static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
static int sctp_wait_for_accept(struct sock *sk, long timeo);
@@ -1828,7 +1829,7 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
if (sctp_wspace(asoc) <= 0 || !sk_wmem_schedule(sk, msg_len)) {
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
err = sctp_wait_for_sndbuf(asoc, transport, &timeo, msg_len);
if (err)
goto err;
if (unlikely(sinfo->sinfo_stream >= asoc->stream.outcnt)) {
@@ -9214,8 +9215,9 @@ void sctp_sock_rfree(struct sk_buff *skb)
/* Helper function to wait for space in the sndbuf. */
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
size_t msg_len)
static int sctp_wait_for_sndbuf(struct sctp_association *asoc,
struct sctp_transport *transport,
long *timeo_p, size_t msg_len)
{
struct sock *sk = asoc->base.sk;
long current_timeo = *timeo_p;
@@ -9225,7 +9227,9 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
*timeo_p, msg_len);
/* Increment the association's refcnt. */
/* Increment the transport and association's refcnt. */
if (transport)
sctp_transport_hold(transport);
sctp_association_hold(asoc);
/* Wait on the association specific sndbuf space. */
@@ -9234,7 +9238,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
TASK_INTERRUPTIBLE);
if (asoc->base.dead)
goto do_dead;
if (!*timeo_p)
if ((!*timeo_p) || (transport && transport->dead))
goto do_nonblock;
if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING)
goto do_error;
@@ -9259,7 +9263,9 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
out:
finish_wait(&asoc->wait, &wait);
/* Release the association's refcnt. */
/* Release the transport and association's refcnt. */
if (transport)
sctp_transport_put(transport);
sctp_association_put(asoc);
return err;

View File

@@ -117,6 +117,8 @@ fail:
*/
void sctp_transport_free(struct sctp_transport *transport)
{
transport->dead = 1;
/* Try to delete the heartbeat timer. */
if (del_timer(&transport->hb_timer))
sctp_transport_put(transport);

View File

@@ -0,0 +1,99 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Landlock - Errata information
*
* Copyright © 2025 Microsoft Corporation
*/
#ifndef _SECURITY_LANDLOCK_ERRATA_H
#define _SECURITY_LANDLOCK_ERRATA_H
#include <linux/init.h>
struct landlock_erratum {
const int abi;
const u8 number;
};
/* clang-format off */
#define LANDLOCK_ERRATUM(NUMBER) \
{ \
.abi = LANDLOCK_ERRATA_ABI, \
.number = NUMBER, \
},
/* clang-format on */
/*
* Some fixes may require user space to check if they are applied on the running
* kernel before using a specific feature. For instance, this applies when a
* restriction was previously too restrictive and is now getting relaxed (for
* compatibility or semantic reasons). However, non-visible changes for
* legitimate use (e.g. security fixes) do not require an erratum.
*/
static const struct landlock_erratum landlock_errata_init[] __initconst = {
/*
* Only Sparse may not implement __has_include. If a compiler does not
* implement __has_include, a warning will be printed at boot time (see
* setup.c).
*/
#ifdef __has_include
#define LANDLOCK_ERRATA_ABI 1
#if __has_include("errata/abi-1.h")
#include "errata/abi-1.h"
#endif
#undef LANDLOCK_ERRATA_ABI
#define LANDLOCK_ERRATA_ABI 2
#if __has_include("errata/abi-2.h")
#include "errata/abi-2.h"
#endif
#undef LANDLOCK_ERRATA_ABI
#define LANDLOCK_ERRATA_ABI 3
#if __has_include("errata/abi-3.h")
#include "errata/abi-3.h"
#endif
#undef LANDLOCK_ERRATA_ABI
#define LANDLOCK_ERRATA_ABI 4
#if __has_include("errata/abi-4.h")
#include "errata/abi-4.h"
#endif
#undef LANDLOCK_ERRATA_ABI
#define LANDLOCK_ERRATA_ABI 5
#if __has_include("errata/abi-5.h")
#include "errata/abi-5.h"
#endif
#undef LANDLOCK_ERRATA_ABI
#define LANDLOCK_ERRATA_ABI 6
#if __has_include("errata/abi-6.h")
#include "errata/abi-6.h"
#endif
#undef LANDLOCK_ERRATA_ABI
/*
* For each new erratum, we need to include all the ABI files up to the impacted
* ABI to make all potential future intermediate errata easy to backport.
*
* If such change involves more than one ABI addition, then it must be in a
* dedicated commit with the same Fixes tag as used for the actual fix.
*
* Each commit creating a new security/landlock/errata/abi-*.h file must have a
* Depends-on tag to reference the commit that previously added the line to
* include this new file, except if the original Fixes tag is enough.
*
* Each erratum must be documented in its related ABI file, and a dedicated
* commit must update Documentation/userspace-api/landlock.rst to include this
* erratum. This commit will not be backported.
*/
#endif
{}
};
#endif /* _SECURITY_LANDLOCK_ERRATA_H */

View File

@@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/**
* DOC: erratum_1
*
* Erratum 1: TCP socket identification
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This fix addresses an issue where IPv4 and IPv6 stream sockets (e.g., SMC,
* MPTCP, or SCTP) were incorrectly restricted by TCP access rights during
* :manpage:`bind(2)` and :manpage:`connect(2)` operations. This change ensures
* that only TCP sockets are subject to TCP access rights, allowing other
* protocols to operate without unnecessary restrictions.
*/
LANDLOCK_ERRATUM(1)

View File

@@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/**
* DOC: erratum_2
*
* Erratum 2: Scoped signal handling
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This fix addresses an issue where signal scoping was overly restrictive,
* preventing sandboxed threads from signaling other threads within the same
* process if they belonged to different domains. Because threads are not
* security boundaries, user space might assume that any thread within the same
* process can send signals between themselves (see :manpage:`nptl(7)` and
* :manpage:`libpsx(3)`). Consistent with :manpage:`ptrace(2)` behavior, direct
* interaction between threads of the same process should always be allowed.
* This change ensures that any thread is allowed to send signals to any other
* thread within the same process, regardless of their domain.
*/
LANDLOCK_ERRATUM(2)

View File

@@ -27,7 +27,9 @@
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/path.h>
#include <linux/pid.h>
#include <linux/rcupdate.h>
#include <linux/sched/signal.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <linux/types.h>
@@ -1623,21 +1625,46 @@ static int hook_file_ioctl_compat(struct file *file, unsigned int cmd,
return -EACCES;
}
static void hook_file_set_fowner(struct file *file)
/*
* Always allow sending signals between threads of the same process. This
* ensures consistency with hook_task_kill().
*/
static bool control_current_fowner(struct fown_struct *const fown)
{
struct landlock_ruleset *new_dom, *prev_dom;
struct task_struct *p;
/*
* Lock already held by __f_setown(), see commit 26f204380a3c ("fs: Fix
* file_set_fowner LSM hook inconsistencies").
*/
lockdep_assert_held(&file_f_owner(file)->lock);
new_dom = landlock_get_current_domain();
landlock_get_ruleset(new_dom);
lockdep_assert_held(&fown->lock);
/*
* Some callers (e.g. fcntl_dirnotify) may not be in an RCU read-side
* critical section.
*/
guard(rcu)();
p = pid_task(fown->pid, fown->pid_type);
if (!p)
return true;
return !same_thread_group(p, current);
}
static void hook_file_set_fowner(struct file *file)
{
struct landlock_ruleset *prev_dom;
struct landlock_ruleset *new_dom = NULL;
if (control_current_fowner(file_f_owner(file))) {
new_dom = landlock_get_current_domain();
landlock_get_ruleset(new_dom);
}
prev_dom = landlock_file(file)->fown_domain;
landlock_file(file)->fown_domain = new_dom;
/* Called in an RCU read-side critical section. */
/* May be called in an RCU read-side critical section. */
landlock_put_ruleset_deferred(prev_dom);
}

View File

@@ -6,12 +6,14 @@
* Copyright © 2018-2020 ANSSI
*/
#include <linux/bits.h>
#include <linux/init.h>
#include <linux/lsm_hooks.h>
#include <uapi/linux/lsm.h>
#include "common.h"
#include "cred.h"
#include "errata.h"
#include "fs.h"
#include "net.h"
#include "setup.h"
@@ -19,6 +21,11 @@
bool landlock_initialized __ro_after_init = false;
const struct lsm_id landlock_lsmid = {
.name = LANDLOCK_NAME,
.id = LSM_ID_LANDLOCK,
};
struct lsm_blob_sizes landlock_blob_sizes __ro_after_init = {
.lbs_cred = sizeof(struct landlock_cred_security),
.lbs_file = sizeof(struct landlock_file_security),
@@ -26,13 +33,36 @@ struct lsm_blob_sizes landlock_blob_sizes __ro_after_init = {
.lbs_superblock = sizeof(struct landlock_superblock_security),
};
const struct lsm_id landlock_lsmid = {
.name = LANDLOCK_NAME,
.id = LSM_ID_LANDLOCK,
};
int landlock_errata __ro_after_init;
static void __init compute_errata(void)
{
size_t i;
#ifndef __has_include
/*
* This is a safeguard to make sure the compiler implements
* __has_include (see errata.h).
*/
WARN_ON_ONCE(1);
return;
#endif
for (i = 0; landlock_errata_init[i].number; i++) {
const int prev_errata = landlock_errata;
if (WARN_ON_ONCE(landlock_errata_init[i].abi >
landlock_abi_version))
continue;
landlock_errata |= BIT(landlock_errata_init[i].number - 1);
WARN_ON_ONCE(prev_errata == landlock_errata);
}
}
static int __init landlock_init(void)
{
compute_errata();
landlock_add_cred_hooks();
landlock_add_task_hooks();
landlock_add_fs_hooks();

View File

@@ -11,7 +11,10 @@
#include <linux/lsm_hooks.h>
extern const int landlock_abi_version;
extern bool landlock_initialized;
extern int landlock_errata;
extern struct lsm_blob_sizes landlock_blob_sizes;
extern const struct lsm_id landlock_lsmid;

View File

@@ -159,7 +159,9 @@ static const struct file_operations ruleset_fops = {
* the new ruleset.
* @size: Size of the pointed &struct landlock_ruleset_attr (needed for
* backward and forward compatibility).
* @flags: Supported value: %LANDLOCK_CREATE_RULESET_VERSION.
* @flags: Supported value:
* - %LANDLOCK_CREATE_RULESET_VERSION
* - %LANDLOCK_CREATE_RULESET_ERRATA
*
* This system call enables to create a new Landlock ruleset, and returns the
* related file descriptor on success.
@@ -168,6 +170,10 @@ static const struct file_operations ruleset_fops = {
* 0, then the returned value is the highest supported Landlock ABI version
* (starting at 1).
*
* If @flags is %LANDLOCK_CREATE_RULESET_ERRATA and @attr is NULL and @size is
* 0, then the returned value is a bitmask of fixed issues for the current
* Landlock ABI version.
*
* Possible returned errors are:
*
* - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time;
@@ -191,9 +197,15 @@ SYSCALL_DEFINE3(landlock_create_ruleset,
return -EOPNOTSUPP;
if (flags) {
if ((flags == LANDLOCK_CREATE_RULESET_VERSION) && !attr &&
!size)
return LANDLOCK_ABI_VERSION;
if (attr || size)
return -EINVAL;
if (flags == LANDLOCK_CREATE_RULESET_VERSION)
return landlock_abi_version;
if (flags == LANDLOCK_CREATE_RULESET_ERRATA)
return landlock_errata;
return -EINVAL;
}
@@ -234,6 +246,8 @@ SYSCALL_DEFINE3(landlock_create_ruleset,
return ruleset_fd;
}
const int landlock_abi_version = LANDLOCK_ABI_VERSION;
/*
* Returns an owned ruleset from a FD. It is thus needed to call
* landlock_put_ruleset() on the return value.

View File

@@ -13,6 +13,7 @@
#include <linux/lsm_hooks.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
#include <net/af_unix.h>
#include <net/sock.h>
@@ -264,6 +265,17 @@ static int hook_task_kill(struct task_struct *const p,
/* Dealing with USB IO. */
dom = landlock_cred(cred)->domain;
} else {
/*
* Always allow sending signals between threads of the same process.
* This is required for process credential changes by the Native POSIX
* Threads Library and implemented by the set*id(2) wrappers and
* libcap(3) with tgkill(2). See nptl(7) and libpsx(3).
*
* This exception is similar to the __ptrace_may_access() one.
*/
if (same_thread_group(p, current))
return 0;
dom = landlock_get_current_domain();
}
dom = landlock_get_applicable_domain(dom, signal_scope);

View File

@@ -867,8 +867,8 @@ static void btf_dump_emit_bit_padding(const struct btf_dump *d,
} pads[] = {
{"long", d->ptr_sz * 8}, {"int", 32}, {"short", 16}, {"char", 8}
};
int new_off, pad_bits, bits, i;
const char *pad_type;
int new_off = 0, pad_bits = 0, bits, i;
const char *pad_type = NULL;
if (cur_off >= next_off)
return; /* no gap */

View File

@@ -98,10 +98,54 @@ TEST(abi_version)
ASSERT_EQ(EINVAL, errno);
}
/*
* Old source trees might not have the set of Kselftest fixes related to kernel
* UAPI headers.
*/
#ifndef LANDLOCK_CREATE_RULESET_ERRATA
#define LANDLOCK_CREATE_RULESET_ERRATA (1U << 1)
#endif
TEST(errata)
{
const struct landlock_ruleset_attr ruleset_attr = {
.handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE,
};
int errata;
errata = landlock_create_ruleset(NULL, 0,
LANDLOCK_CREATE_RULESET_ERRATA);
/* The errata bitmask will not be backported to tests. */
ASSERT_LE(0, errata);
TH_LOG("errata: 0x%x", errata);
ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, 0,
LANDLOCK_CREATE_RULESET_ERRATA));
ASSERT_EQ(EINVAL, errno);
ASSERT_EQ(-1, landlock_create_ruleset(NULL, sizeof(ruleset_attr),
LANDLOCK_CREATE_RULESET_ERRATA));
ASSERT_EQ(EINVAL, errno);
ASSERT_EQ(-1,
landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr),
LANDLOCK_CREATE_RULESET_ERRATA));
ASSERT_EQ(EINVAL, errno);
ASSERT_EQ(-1, landlock_create_ruleset(
NULL, 0,
LANDLOCK_CREATE_RULESET_VERSION |
LANDLOCK_CREATE_RULESET_ERRATA));
ASSERT_EQ(-1, landlock_create_ruleset(NULL, 0,
LANDLOCK_CREATE_RULESET_ERRATA |
1 << 31));
ASSERT_EQ(EINVAL, errno);
}
/* Tests ordering of syscall argument checks. */
TEST(create_ruleset_checks_ordering)
{
const int last_flag = LANDLOCK_CREATE_RULESET_VERSION;
const int last_flag = LANDLOCK_CREATE_RULESET_ERRATA;
const int invalid_flag = last_flag << 1;
int ruleset_fd;
const struct landlock_ruleset_attr ruleset_attr = {

Some files were not shown because too many files have changed in this diff Show More