Merge 5.10.240 into android12-5.10-lts

Changes in 5.10.240
	cifs: Fix cifs_query_path_info() for Windows NT servers
	NFSv4.2: fix listxattr to return selinux security label
	mailbox: Not protect module_put with spin_lock_irqsave
	mfd: max14577: Fix wakeup source leaks on device unbind
	leds: multicolor: Fix intensity setting while SW blinking
	hwmon: (pmbus/max34440) Fix support for max34451
	dmaengine: xilinx_dma: Set dma_device directions
	md/md-bitmap: fix dm-raid max_write_behind setting
	bcache: fix NULL pointer in cache_set_flush()
	iio: pressure: zpa2326: Use aligned_s64 for the timestamp
	um: Add cmpxchg8b_emu and checksum functions to asm-prototypes.h
	usb: potential integer overflow in usbg_make_tpg()
	usb: common: usb-conn-gpio: use a unique name for usb connector device
	usb: Add checks for snprintf() calls in usb_alloc_dev()
	usb: cdc-wdm: avoid setting WDM_READ for ZLP-s
	usb: typec: displayport: Receive DP Status Update NAK request exit dp altmode
	ALSA: hda: Ignore unsol events for cards being shut down
	ALSA: hda: Add new pci id for AMD GPU display HD audio controller
	ceph: fix possible integer overflow in ceph_zero_objects()
	ovl: Check for NULL d_inode() in ovl_dentry_upper()
	VMCI: check context->notify_page after call to get_user_pages_fast() to avoid GPF
	VMCI: fix race between vmci_host_setup_notify and vmci_ctx_unset_notify
	usb: typec: tcpci_maxim: Fix uninitialized return variable
	usb: typec: tcpci_maxim: remove redundant assignment
	usb: typec: tcpci_maxim: add terminating newlines to logging
	usb: typec: tcpm/tcpci_maxim: Fix bounds check in process_rx()
	fs/jfs: consolidate sanity checking in dbMount
	jfs: validate AG parameters in dbMount() to prevent crashes
	media: omap3isp: use sgtable-based scatterlist wrappers
	can: tcan4x5x: fix power regulator retrieval during probe
	f2fs: don't over-report free space or inodes in statvfs
	RDMA/core: Use refcount_t instead of atomic_t on refcount of iwcm_id_private
	RDMA/iwcm: Fix use-after-free of work objects after cm_id destruction
	uio: uio_hv_generic: use devm_kzalloc() for private data alloc
	Drivers: hv: vmbus: Fix duplicate CPU assignments within a device
	Drivers: hv: Rename 'alloced' to 'allocated'
	Drivers: hv: vmbus: Add utility function for querying ring size
	uio_hv_generic: Query the ringbuffer size for device
	uio_hv_generic: Align ring size to system page
	PCI: cadence-ep: Correct PBA offset in .set_msix() callback
	net_sched: sch_sfq: reject invalid perturb period
	i2c: tiny-usb: disable zero-length read messages
	i2c: robotfuzz-osif: disable zero-length read messages
	atm: clip: prevent NULL deref in clip_push()
	ALSA: usb-audio: Fix out-of-bounds read in snd_usb_get_audioformat_uac3()
	attach_recursive_mnt(): do not lock the covering tree when sliding something under it
	libbpf: Fix null pointer dereference in btf_dump__free on allocation failure
	wifi: mac80211: fix beacon interval calculation overflow
	vsock/uapi: fix linux/vm_sockets.h userspace compilation errors
	um: ubd: Add missing error check in start_io_thread()
	net: enetc: Correct endianness handling in _enetc_rd_reg64
	atm: Release atm_dev_mutex after removing procfs in atm_dev_deregister().
	dt-bindings: serial: 8250: Make clocks and clock-frequency exclusive
	Bluetooth: L2CAP: Fix L2CAP MTU negotiation
	dm-raid: fix variable in journal device check
	btrfs: update superblock's device bytes_used when dropping chunk
	HID: wacom: fix memory leak on kobject creation failure
	HID: wacom: fix memory leak on sysfs attribute creation failure
	HID: wacom: fix kobject reference count leak
	drm/tegra: Assign plane type before registration
	drm/tegra: Fix a possible null pointer dereference
	drm/udl: Unregister device before cleaning up on disconnect
	drm/amdkfd: Fix race in GWS queue scheduling
	drm/bridge: cdns-dsi: Fix the clock variable for mode_valid()
	drm/bridge: cdns-dsi: Fix connecting to next bridge
	drm/bridge: cdns-dsi: Check return value when getting default PHY config
	drm/bridge: cdns-dsi: Wait for Clk and Data Lanes to be ready
	PCI: hv: Do not set PCI_COMMAND_MEMORY to reduce VM boot time
	s390: Add '-std=gnu11' to decompressor and purgatory CFLAGS
	Revert "ipv6: save dontfrag in cork"
	arm64: Restrict pagetable teardown to avoid false warning
	rtc: cmos: use spin_lock_irqsave in cmos_interrupt
	vsock/vmci: Clear the vmci transport packet properly when initializing it
	mmc: sdhci: Add a helper function for dump register in dynamic debug mode
	Revert "mmc: sdhci: Disable SD card clock before changing parameters"
	usb: typec: altmodes/displayport: do not index invalid pin_assignments
	mtk-sd: Fix a pagefault in dma_unmap_sg() for not prepared data
	platform/mellanox: mlxbf-tmfifo: fix vring_desc.len assignment
	RDMA/mlx5: Initialize obj_event->obj_sub_list before xa_insert
	nfs: Clean up /proc/net/rpc/nfs when nfs_fs_proc_net_init() fails.
	NFSv4/pNFS: Fix a race to wake on NFS_LAYOUT_DRAIN
	scsi: qla2xxx: Fix DMA mapping test in qla24xx_get_port_database()
	scsi: qla4xxx: Fix missing DMA mapping error in qla4xxx_alloc_pdu()
	scsi: ufs: core: Fix spelling of a sysfs attribute name
	RDMA/mlx5: Fix CC counters query for MPV
	btrfs: fix missing error handling when searching for inode refs during log replay
	drm/exynos: fimd: Guard display clock control with runtime PM calls
	spi: spi-fsl-dspi: Clear completion counter before initiating transfer
	drm/i915/selftests: Change mock_request() to return error pointers
	drm/i915/gt: Fix timeline left held on VMA alloc error
	lib: test_objagg: Set error message in check_expect_hints_stats()
	amd-xgbe: align CL37 AN sequence as per databook
	enic: fix incorrect MTU comparison in enic_change_mtu()
	net: rose: Fix fall-through warnings for Clang
	rose: fix dangling neighbour pointers in rose_rt_device_down()
	nui: Fix dma_mapping_error() check
	net/sched: Always pass notifications when child class becomes empty
	ALSA: sb: Force to disable DMAs once when DMA mode is changed
	ata: pata_cs5536: fix build on 32-bit UML
	powerpc: Fix struct termio related ioctl macros
	scsi: target: Fix NULL pointer dereference in core_scsi3_decode_spec_i_port()
	wifi: mac80211: drop invalid source address OCB frames
	wifi: ath6kl: remove WARN on bad firmware input
	ACPICA: Refuse to evaluate a method if arguments are missing
	rcu: Return early if callback is not specified
	regulator: gpio: Fix the out-of-bounds access to drvdata::gpiods
	mmc: mediatek: use data instead of mrq parameter from msdc_{un}prepare_data()
	mtk-sd: Prevent memory corruption from DMA map failure
	mtk-sd: reset host->mrq on prepare_data() error
	drm/v3d: Disable interrupts before resetting the GPU
	RDMA/mlx5: Fix vport loopback for MPV device
	flexfiles/pNFS: update stats on NFS4ERR_DELAY for v4.1 DSes
	NFSv4/flexfiles: Fix handling of NFS level errors in I/O
	btrfs: propagate last_unlink_trans earlier when doing a rmdir
	btrfs: use btrfs_record_snapshot_destroy() during rmdir
	dpaa2-eth: rename dpaa2_eth_xdp_release_buf into dpaa2_eth_recycle_buf
	dpaa2-eth: Update dpni_get_single_step_cfg command
	dpaa2-eth: Update SINGLE_STEP register access
	net: dpaa2-eth: rearrange variable in dpaa2_eth_get_ethtool_stats
	dpaa2-eth: fix xdp_rxq_info leak
	xhci: dbctty: disable ECHO flag by default
	xhci: dbc: Flush queued requests before stopping dbc
	Logitech C-270 even more broken
	usb: typec: displayport: Fix potential deadlock
	ACPI: PAD: fix crash in exit_round_robin()
	media: uvcvideo: Return the number of processed controls
	media: uvcvideo: Send control events for partial succeeds
	media: uvcvideo: Rollback non processed entities on error
	staging: rtl8723bs: Avoid memset() in aes_cipher() and aes_decipher()
	drm/exynos: exynos7_drm_decon: add vblank check in IRQ handling
	ASoC: fsl_asrc: use internal measured ratio for non-ideal ratio mode
	perf: Revert to requiring CAP_SYS_ADMIN for uprobes
	fix proc_sys_compare() handling of in-lookup dentries
	netlink: Fix wraparounds of sk->sk_rmem_alloc.
	tipc: Fix use-after-free in tipc_conn_close().
	vsock: Fix transport_{g2h,h2g} TOCTOU
	vm_sockets: Add flags field in the vsock address data structure
	vm_sockets: Add VMADDR_FLAG_TO_HOST vsock flag
	af_vsock: Set VMADDR_FLAG_TO_HOST flag on the receive path
	af_vsock: Assign the vsock transport considering the vsock address flags
	vsock: Fix transport_* TOCTOU
	vsock: Fix IOCTL_VM_SOCKETS_GET_LOCAL_CID to check also `transport_local`
	net: phy: smsc: Fix Auto-MDIX configuration when disabled by strap
	net: phy: smsc: Fix link failure in forced mode with Auto-MDIX
	atm: clip: Fix potential null-ptr-deref in to_atmarpd().
	atm: clip: Fix memory leak of struct clip_vcc.
	atm: clip: Fix infinite recursive call of clip_push().
	atm: clip: Fix NULL pointer dereference in vcc_sendmsg()
	net/sched: Abort __tc_modify_qdisc if parent class does not exist
	fs/proc: do_task_stat: use __for_each_thread()
	rxrpc: Fix oops due to non-existence of prealloc backlog struct
	Documentation: x86/bugs/its: Add ITS documentation
	x86/bhi: Define SPEC_CTRL_BHI_DIS_S
	x86/its: Enumerate Indirect Target Selection (ITS) bug
	x86/alternatives: Introduce int3_emulate_jcc()
	x86/alternatives: Teach text_poke_bp() to patch Jcc.d32 instructions
	x86/its: Add support for ITS-safe indirect thunk
	x86/alternative: Optimize returns patching
	x86/alternatives: Remove faulty optimization
	x86/its: Add support for ITS-safe return thunk
	x86/its: Fix undefined reference to cpu_wants_rethunk_at()
	x86/its: Enable Indirect Target Selection mitigation
	x86/its: Add "vmexit" option to skip mitigation on some CPUs
	x86/modules: Set VM_FLUSH_RESET_PERMS in module_alloc()
	x86/its: Use dynamic thunks for indirect branches
	x86/its: Fix build errors when CONFIG_MODULES=n
	x86/its: FineIBT-paranoid vs ITS
	x86/mce/amd: Fix threshold limit reset
	x86/mce: Don't remove sysfs if thresholding sysfs init fails
	x86/mce: Make sure CMCI banks are cleared during shutdown on Intel
	pinctrl: qcom: msm: mark certain pins as invalid for interrupts
	drm/sched: Increment job count before swapping tail spsc queue
	usb: gadget: u_serial: Fix race condition in TTY wakeup
	Revert "ACPI: battery: negate current when discharging"
	ethernet: atl1: Add missing DMA mapping error checks and count errors
	rtc: lib_test: add MODULE_LICENSE
	pwm: mediatek: Ensure to disable clocks in error path
	netlink: Fix rmem check in netlink_broadcast_deliver().
	netlink: make sure we allow at least one dump skb
	Input: xpad - add support for Amazon Game Controller
	Input: xpad - add VID for Turtle Beach controllers
	Input: xpad - support Acer NGR 200 Controller
	dma-buf: fix timeout handling in dma_resv_wait_timeout v2
	wifi: zd1211rw: Fix potential NULL pointer dereference in zd_mac_tx_to_dev()
	md/raid1: Fix stack memory use after return in raid1_reshape
	net: appletalk: Fix device refcount leak in atrtr_create()
	net: phy: microchip: limit 100M workaround to link-down events on LAN88xx
	can: m_can: m_can_handle_lost_msg(): downgrade msg lost in rx message to debug level
	net: ll_temac: Fix missing tx_pending check in ethtools_set_ringparam()
	bnxt_en: Fix DCB ETS validation
	bnxt_en: Set DMA unmap len correctly for XDP_REDIRECT
	atm: idt77252: Add missing `dma_map_error()`
	um: vector: Reduce stack usage in vector_eth_configure()
	net: usb: qmi_wwan: add SIMCom 8230C composition
	vt: add missing notification when switching back to text mode
	HID: Add IGNORE quirk for SMARTLINKTECHNOLOGY
	HID: quirks: Add quirk for 2 Chicony Electronics HP 5MP Cameras
	Input: atkbd - do not skip atkbd_deactivate() when skipping ATKBD_CMD_GETID
	vhost-scsi: protect vq->log_used with vq->mutex
	x86/mm: Disable hugetlb page table sharing on 32-bit
	x86/bugs: Rename MDS machinery to something more generic
	x86/bugs: Add a Transient Scheduler Attacks mitigation
	KVM: x86: add support for CPUID leaf 0x80000021
	KVM: SVM: Advertise TSA CPUID bits to guests
	x86/process: Move the buffer clearing before MONITOR
	rseq: Fix segfault on registration when rseq_cs is non-zero
	Linux 5.10.240

Change-Id: I75c56007632194306321eca64ab10f3d2531da3d
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2025-07-18 11:01:10 +00:00
199 changed files with 2707 additions and 791 deletions

View File

@@ -511,6 +511,7 @@ Description: information about CPUs heterogeneity.
What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/gather_data_sampling
/sys/devices/system/cpu/vulnerabilities/indirect_target_selection
/sys/devices/system/cpu/vulnerabilities/itlb_multihit
/sys/devices/system/cpu/vulnerabilities/l1tf
/sys/devices/system/cpu/vulnerabilities/mds
@@ -522,6 +523,7 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/spectre_v1
/sys/devices/system/cpu/vulnerabilities/spectre_v2
/sys/devices/system/cpu/vulnerabilities/srbds
/sys/devices/system/cpu/vulnerabilities/tsa
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
Date: January 2018
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>

View File

@@ -655,7 +655,7 @@ Description: This file shows the thin provisioning type. This is one of
The file is read only.
What: /sys/class/scsi_device/*/device/unit_descriptor/physical_memory_resourse_count
What: /sys/class/scsi_device/*/device/unit_descriptor/physical_memory_resource_count
Date: February 2018
Contact: Stanislav Nijnikov <stanislav.nijnikov@wdc.com>
Description: This file shows the total physical memory resources. This is

View File

@@ -19,3 +19,4 @@ are configurable at compile, boot or run time.
gather_data_sampling.rst
srso
reg-file-data-sampling
indirect-target-selection

View File

@@ -0,0 +1,156 @@
.. SPDX-License-Identifier: GPL-2.0
Indirect Target Selection (ITS)
===============================
ITS is a vulnerability in some Intel CPUs that support Enhanced IBRS and were
released before Alder Lake. ITS may allow an attacker to control the prediction
of indirect branches and RETs located in the lower half of a cacheline.
ITS is assigned CVE-2024-28956 with a CVSS score of 4.7 (Medium).
Scope of Impact
---------------
- **eIBRS Guest/Host Isolation**: Indirect branches in KVM/kernel may still be
predicted with unintended target corresponding to a branch in the guest.
- **Intra-Mode BTI**: In-kernel training such as through cBPF or other native
gadgets.
- **Indirect Branch Prediction Barrier (IBPB)**: After an IBPB, indirect
branches may still be predicted with targets corresponding to direct branches
executed prior to the IBPB. This is fixed by the IPU 2025.1 microcode, which
should be available via distro updates. Alternatively microcode can be
obtained from Intel's github repository [#f1]_.
Affected CPUs
-------------
Below is the list of ITS affected CPUs [#f2]_ [#f3]_:
======================== ============ ==================== ===============
Common name Family_Model eIBRS Intra-mode BTI
Guest/Host Isolation
======================== ============ ==================== ===============
SKYLAKE_X (step >= 6) 06_55H Affected Affected
ICELAKE_X 06_6AH Not affected Affected
ICELAKE_D 06_6CH Not affected Affected
ICELAKE_L 06_7EH Not affected Affected
TIGERLAKE_L 06_8CH Not affected Affected
TIGERLAKE 06_8DH Not affected Affected
KABYLAKE_L (step >= 12) 06_8EH Affected Affected
KABYLAKE (step >= 13) 06_9EH Affected Affected
COMETLAKE 06_A5H Affected Affected
COMETLAKE_L 06_A6H Affected Affected
ROCKETLAKE 06_A7H Not affected Affected
======================== ============ ==================== ===============
- All affected CPUs enumerate Enhanced IBRS feature.
- IBPB isolation is affected on all ITS affected CPUs, and need a microcode
update for mitigation.
- None of the affected CPUs enumerate BHI_CTRL which was introduced in Golden
Cove (Alder Lake and Sapphire Rapids). This can help guests to determine the
host's affected status.
- Intel Atom CPUs are not affected by ITS.
Mitigation
----------
As only the indirect branches and RETs that have their last byte of instruction
in the lower half of the cacheline are vulnerable to ITS, the basic idea behind
the mitigation is to not allow indirect branches in the lower half.
This is achieved by relying on existing retpoline support in the kernel, and in
compilers. ITS-vulnerable retpoline sites are runtime patched to point to newly
added ITS-safe thunks. These safe thunks consists of indirect branch in the
second half of the cacheline. Not all retpoline sites are patched to thunks, if
a retpoline site is evaluated to be ITS-safe, it is replaced with an inline
indirect branch.
Dynamic thunks
~~~~~~~~~~~~~~
From a dynamically allocated pool of safe-thunks, each vulnerable site is
replaced with a new thunk, such that they get a unique address. This could
improve the branch prediction accuracy. Also, it is a defense-in-depth measure
against aliasing.
Note, for simplicity, indirect branches in eBPF programs are always replaced
with a jump to a static thunk in __x86_indirect_its_thunk_array. If required,
in future this can be changed to use dynamic thunks.
All vulnerable RETs are replaced with a static thunk, they do not use dynamic
thunks. This is because RETs get their prediction from RSB mostly that does not
depend on source address. RETs that underflow RSB may benefit from dynamic
thunks. But, RETs significantly outnumber indirect branches, and any benefit
from a unique source address could be outweighed by the increased icache
footprint and iTLB pressure.
Retpoline
~~~~~~~~~
Retpoline sequence also mitigates ITS-unsafe indirect branches. For this
reason, when retpoline is enabled, ITS mitigation only relocates the RETs to
safe thunks. Unless user requested the RSB-stuffing mitigation.
Mitigation in guests
^^^^^^^^^^^^^^^^^^^^
All guests deploy ITS mitigation by default, irrespective of eIBRS enumeration
and Family/Model of the guest. This is because eIBRS feature could be hidden
from a guest. One exception to this is when a guest enumerates BHI_DIS_S, which
indicates that the guest is running on an unaffected host.
To prevent guests from unnecessarily deploying the mitigation on unaffected
platforms, Intel has defined ITS_NO bit(62) in MSR IA32_ARCH_CAPABILITIES. When
a guest sees this bit set, it should not enumerate the ITS bug. Note, this bit
is not set by any hardware, but is **intended for VMMs to synthesize** it for
guests as per the host's affected status.
Mitigation options
^^^^^^^^^^^^^^^^^^
The ITS mitigation can be controlled using the "indirect_target_selection"
kernel parameter. The available options are:
======== ===================================================================
on (default) Deploy the "Aligned branch/return thunks" mitigation.
If spectre_v2 mitigation enables retpoline, aligned-thunks are only
deployed for the affected RET instructions. Retpoline mitigates
indirect branches.
off Disable ITS mitigation.
vmexit Equivalent to "=on" if the CPU is affected by guest/host isolation
part of ITS. Otherwise, mitigation is not deployed. This option is
useful when host userspace is not in the threat model, and only
attacks from guest to host are considered.
force Force the ITS bug and deploy the default mitigation.
======== ===================================================================
Sysfs reporting
---------------
The sysfs file showing ITS mitigation status is:
/sys/devices/system/cpu/vulnerabilities/indirect_target_selection
Note, microcode mitigation status is not reported in this file.
The possible values in this file are:
.. list-table::
* - Not affected
- The processor is not vulnerable.
* - Vulnerable
- System is vulnerable and no mitigation has been applied.
* - Vulnerable, KVM: Not affected
- System is vulnerable to intra-mode BTI, but not affected by eIBRS
guest/host isolation.
* - Mitigation: Aligned branch/return thunks
- The mitigation is enabled, affected indirect branches and RETs are
relocated to safe thunks.
References
----------
.. [#f1] Microcode repository - https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files
.. [#f2] Affected Processors list - https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html
.. [#f3] Affected Processors list (machine readable) - https://github.com/intel/Intel-affected-processor-list

View File

@@ -157,9 +157,7 @@ This is achieved by using the otherwise unused and obsolete VERW instruction in
combination with a microcode update. The microcode clears the affected CPU
buffers when the VERW instruction is executed.
Kernel reuses the MDS function to invoke the buffer clearing:
mds_clear_cpu_buffers()
Kernel does the buffer clearing with x86_clear_cpu_buffers().
On MDS affected CPUs, the kernel already invokes CPU buffer clear on
kernel/userspace, hypervisor/guest and C-state (idle) transitions. No

View File

@@ -1892,6 +1892,20 @@
different crypto accelerators. This option can be used
to achieve best performance for particular HW.
indirect_target_selection= [X86,Intel] Mitigation control for Indirect
Target Selection(ITS) bug in Intel CPUs. Updated
microcode is also required for a fix in IBPB.
on: Enable mitigation (default).
off: Disable mitigation.
force: Force the ITS bug and deploy default
mitigation.
vmexit: Only deploy mitigation if CPU is affected by
guest/host isolation part of ITS.
For details see:
Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
init= [KNL]
Format: <full_path>
Run specified binary instead of /sbin/init as init
@@ -3000,6 +3014,7 @@
improves system performance, but it may also
expose users to several CPU vulnerabilities.
Equivalent to: gather_data_sampling=off [X86]
indirect_target_selection=off [X86]
kpti=0 [ARM64]
kvm.nx_huge_pages=off [X86]
l1tf=off [X86]
@@ -5707,6 +5722,19 @@
See Documentation/admin-guide/mm/transhuge.rst
for more details.
tsa= [X86] Control mitigation for Transient Scheduler
Attacks on AMD CPUs. Search the following in your
favourite search engine for more details:
"Technical guidance for mitigating transient scheduler
attacks".
off - disable the mitigation
on - enable the mitigation (default)
user - mitigate only user/kernel transitions
vm - mitigate only guest/host transitions
tsc= Disable clocksource stability checks for TSC.
Format: <string>
[x86] reliable: mark tsc clocksource as reliable, this

View File

@@ -39,7 +39,7 @@ allOf:
- ns16550
- ns16550a
then:
anyOf:
oneOf:
- required: [ clock-frequency ]
- required: [ clocks ]

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 239
SUBLEVEL = 240
EXTRAVERSION =
NAME = Dare mighty things

View File

@@ -1463,7 +1463,8 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
next = addr;
end = addr + PUD_SIZE;
do {
pmd_free_pte_page(pmdp, next);
if (pmd_present(READ_ONCE(*pmdp)))
pmd_free_pte_page(pmdp, next);
} while (pmdp++, next += PMD_SIZE, next != end);
pud_clear(pudp);

View File

@@ -23,10 +23,10 @@
#define TCSETSW _IOW('t', 21, struct termios)
#define TCSETSF _IOW('t', 22, struct termios)
#define TCGETA _IOR('t', 23, struct termio)
#define TCSETA _IOW('t', 24, struct termio)
#define TCSETAW _IOW('t', 25, struct termio)
#define TCSETAF _IOW('t', 28, struct termio)
#define TCGETA 0x40147417 /* _IOR('t', 23, struct termio) */
#define TCSETA 0x80147418 /* _IOW('t', 24, struct termio) */
#define TCSETAW 0x80147419 /* _IOW('t', 25, struct termio) */
#define TCSETAF 0x8014741c /* _IOW('t', 28, struct termio) */
#define TCSBRK _IO('t', 29)
#define TCXONC _IO('t', 30)

View File

@@ -23,7 +23,7 @@ endif
aflags_dwarf := -Wa,-gdwarf-2
KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__
KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf))
KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2
KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -std=gnu11
KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float
KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables

View File

@@ -20,7 +20,7 @@ GCOV_PROFILE := n
UBSAN_SANITIZE := n
KASAN_SANITIZE := n
KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
KBUILD_CFLAGS := -std=gnu11 -fno-strict-aliasing -Wall -Wstrict-prototypes
KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float -fno-common

View File

@@ -41,7 +41,7 @@ int start_io_thread(unsigned long sp, int *fd_out)
*fd_out = fds[1];
err = os_set_fd_block(*fd_out, 0);
err = os_set_fd_block(kernel_fd, 0);
err |= os_set_fd_block(kernel_fd, 0);
if (err) {
printk("start_io_thread - failed to set nonblocking I/O.\n");
goto out_close;

View File

@@ -1600,35 +1600,19 @@ static void vector_eth_configure(
device->dev = dev;
*vp = ((struct vector_private)
{
.list = LIST_HEAD_INIT(vp->list),
.dev = dev,
.unit = n,
.options = get_transport_options(def),
.rx_irq = 0,
.tx_irq = 0,
.parsed = def,
.max_packet = get_mtu(def) + ETH_HEADER_OTHER,
/* TODO - we need to calculate headroom so that ip header
* is 16 byte aligned all the time
*/
.headroom = get_headroom(def),
.form_header = NULL,
.verify_header = NULL,
.header_rxbuffer = NULL,
.header_txbuffer = NULL,
.header_size = 0,
.rx_header_size = 0,
.rexmit_scheduled = false,
.opened = false,
.transport_data = NULL,
.in_write_poll = false,
.coalesce = 2,
.req_size = get_req_size(def),
.in_error = false,
.bpf = NULL
});
INIT_LIST_HEAD(&vp->list);
vp->dev = dev;
vp->unit = n;
vp->options = get_transport_options(def);
vp->parsed = def;
vp->max_packet = get_mtu(def) + ETH_HEADER_OTHER;
/*
* TODO - we need to calculate headroom so that ip header
* is 16 byte aligned all the time
*/
vp->headroom = get_headroom(def);
vp->coalesce = 2;
vp->req_size = get_req_size(def);
dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST);
tasklet_init(&vp->tx_poll, vector_tx_poll, (unsigned long)vp);

View File

@@ -1 +1,6 @@
#include <asm-generic/asm-prototypes.h>
#include <asm/checksum.h>
#ifdef CONFIG_UML_X86
extern void cmpxchg8b_emu(void);
#endif

View File

@@ -105,7 +105,7 @@ config X86
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
select ARCH_WANT_DEFAULT_BPF_JIT if X86_64
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANT_HUGE_PMD_SHARE
select ARCH_WANT_HUGE_PMD_SHARE if X86_64
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANTS_THP_SWAP if X86_64
select BUILDTIME_TABLE_SORT
@@ -2528,6 +2528,26 @@ config MITIGATION_RFDS
stored in floating point, vector and integer registers.
See also <file:Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst>
config MITIGATION_ITS
bool "Enable Indirect Target Selection mitigation"
depends on CPU_SUP_INTEL && X86_64
depends on RETPOLINE && RETHUNK
default y
help
Enable Indirect Target Selection (ITS) mitigation. ITS is a bug in
BPU on some Intel CPUs that may allow Spectre V2 style attacks. If
disabled, mitigation cannot be enabled via cmdline.
See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst>
config MITIGATION_TSA
bool "Mitigate Transient Scheduler Attacks"
depends on CPU_SUP_AMD
default y
help
Enable mitigation for Transient Scheduler Attacks. TSA is a hardware
security vulnerability on AMD CPUs which can lead to forwarding of
invalid info to subsequent instructions and thus can affect their
timing and thereby cause a leakage.
endif
config ARCH_HAS_ADD_PAGES

View File

@@ -31,20 +31,20 @@ EXPORT_SYMBOL_GPL(entry_ibpb);
/*
* Define the VERW operand that is disguised as entry code so that
* it can be referenced with KPTI enabled. This ensure VERW can be
* it can be referenced with KPTI enabled. This ensures VERW can be
* used late in exit-to-user path after page tables are switched.
*/
.pushsection .entry.text, "ax"
.align L1_CACHE_BYTES, 0xcc
SYM_CODE_START_NOALIGN(mds_verw_sel)
SYM_CODE_START_NOALIGN(x86_verw_sel)
UNWIND_HINT_EMPTY
ANNOTATE_NOENDBR
.word __KERNEL_DS
.align L1_CACHE_BYTES, 0xcc
SYM_CODE_END(mds_verw_sel);
SYM_CODE_END(x86_verw_sel);
/* For KVM */
EXPORT_SYMBOL_GPL(mds_verw_sel);
EXPORT_SYMBOL_GPL(x86_verw_sel);
.popsection

View File

@@ -80,6 +80,32 @@ extern void apply_returns(s32 *start, s32 *end);
struct module;
extern u8 *its_static_thunk(int reg);
#ifdef CONFIG_MITIGATION_ITS
extern void its_init_mod(struct module *mod);
extern void its_fini_mod(struct module *mod);
extern void its_free_mod(struct module *mod);
#else /* CONFIG_MITIGATION_ITS */
static inline void its_init_mod(struct module *mod) { }
static inline void its_fini_mod(struct module *mod) { }
static inline void its_free_mod(struct module *mod) { }
#endif
#if defined(CONFIG_RETHUNK) && defined(CONFIG_STACK_VALIDATION)
extern bool cpu_wants_rethunk(void);
extern bool cpu_wants_rethunk_at(void *addr);
#else
static __always_inline bool cpu_wants_rethunk(void)
{
return false;
}
static __always_inline bool cpu_wants_rethunk_at(void *addr)
{
return false;
}
#endif
#ifdef CONFIG_SMP
extern void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,

View File

@@ -63,4 +63,17 @@ void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
#else
static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {}
#endif
union zen_patch_rev {
struct {
__u32 rev : 8,
stepping : 4,
model : 4,
__reserved : 4,
ext_model : 4,
ext_fam : 8;
};
__u32 ucode_rev;
};
#endif /* _ASM_X86_CPU_H */

View File

@@ -34,6 +34,7 @@ enum cpuid_leafs
CPUID_8000_001F_EAX,
CPUID_8000_0021_EAX,
CPUID_LNX_5,
CPUID_8000_0021_ECX,
NR_CPUID_WORDS,
};
@@ -97,7 +98,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 21, feature_bit) || \
REQUIRED_MASK_CHECK || \
BUILD_BUG_ON_ZERO(NCAPINTS != 22))
BUILD_BUG_ON_ZERO(NCAPINTS != 23))
#define DISABLED_MASK_BIT_SET(feature_bit) \
( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
@@ -123,7 +124,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 21, feature_bit) || \
DISABLED_MASK_CHECK || \
BUILD_BUG_ON_ZERO(NCAPINTS != 22))
BUILD_BUG_ON_ZERO(NCAPINTS != 23))
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \

View File

@@ -13,7 +13,7 @@
/*
* Defines x86 CPU feature bits
*/
#define NCAPINTS 22 /* N 32-bit words worth of info */
#define NCAPINTS 23 /* N 32-bit words worth of info */
#define NBUGINTS 2 /* N 32-bit bug flags */
/*
@@ -289,8 +289,8 @@
#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
#define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */
#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
/* FREE! (11*32+ 8) */
/* FREE! (11*32+ 9) */
#define X86_FEATURE_BHI_CTRL (11*32+ 8) /* "" BHI_DIS_S HW control available */
#define X86_FEATURE_INDIRECT_THUNK_ITS (11*32+ 9) /* "" Use thunk for indirect branches in lower half of cacheline */
#define X86_FEATURE_ENTRY_IBPB (11*32+10) /* "" Issue an IBPB on kernel entry */
#define X86_FEATURE_RRSBA_CTRL (11*32+11) /* "" RET prediction control */
#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
@@ -406,11 +406,16 @@
#define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
#define X86_FEATURE_VERW_CLEAR (20*32+ 5) /* "" The memory form of VERW mitigates TSA */
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */
#define X86_FEATURE_SBPB (20*32+27) /* "" Selective Branch Prediction Barrier */
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */
#define X86_FEATURE_TSA_SQ_NO (22*32+11) /* "" AMD CPU not vulnerable to TSA-SQ */
#define X86_FEATURE_TSA_L1_NO (22*32+12) /* "" AMD CPU not vulnerable to TSA-L1 */
#define X86_FEATURE_CLEAR_CPU_BUF_VM (22*32+13) /* "" Clear CPU buffers using VERW before VMRUN */
/*
* BUG word(s)
*/
@@ -459,4 +464,7 @@
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
#define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */
#define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
#define X86_BUG_ITS X86_BUG(1*32 + 5) /* CPU is affected by Indirect Target Selection */
#define X86_BUG_ITS_NATIVE_ONLY X86_BUG(1*32 + 6) /* CPU is affected by ITS, VMX is not affected */
#define X86_BUG_TSA X86_BUG(1*32 + 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */
#endif /* _ASM_X86_CPUFEATURES_H */

View File

@@ -104,6 +104,6 @@
#define DISABLED_MASK19 0
#define DISABLED_MASK20 0
#define DISABLED_MASK21 0
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 23)
#endif /* _ASM_X86_DISABLED_FEATURES_H */

View File

@@ -56,13 +56,13 @@ static __always_inline void native_irq_enable(void)
static inline __cpuidle void native_safe_halt(void)
{
mds_idle_clear_cpu_buffers();
x86_idle_clear_cpu_buffers();
asm volatile("sti; hlt": : :"memory");
}
static inline __cpuidle void native_halt(void)
{
mds_idle_clear_cpu_buffers();
x86_idle_clear_cpu_buffers();
asm volatile("hlt": : :"memory");
}

View File

@@ -55,10 +55,13 @@
#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
#define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */
#define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
#define SPEC_CTRL_BHI_DIS_S_SHIFT 10 /* Disable Branch History Injection behavior */
#define SPEC_CTRL_BHI_DIS_S BIT(SPEC_CTRL_BHI_DIS_S_SHIFT)
/* A mask for bits which the kernel toggles when controlling mitigations */
#define SPEC_CTRL_MITIGATIONS_MASK (SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD \
| SPEC_CTRL_RRSBA_DIS_S)
| SPEC_CTRL_RRSBA_DIS_S \
| SPEC_CTRL_BHI_DIS_S)
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
@@ -176,6 +179,14 @@
* VERW clears CPU Register
* File.
*/
#define ARCH_CAP_ITS_NO BIT_ULL(62) /*
* Not susceptible to
* Indirect Target Selection.
* This bit is not set by
* HW, but is synthesized by
* VMMs for guests to know
* their affected status.
*/
#define MSR_IA32_FLUSH_CMD 0x0000010b
#define L1D_FLUSH BIT(0) /*

View File

@@ -43,8 +43,6 @@ static inline void __monitorx(const void *eax, unsigned long ecx,
static inline void __mwait(unsigned long eax, unsigned long ecx)
{
mds_idle_clear_cpu_buffers();
/* "mwait %eax, %ecx;" */
asm volatile(".byte 0x0f, 0x01, 0xc9;"
:: "a" (eax), "c" (ecx));
@@ -79,7 +77,7 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
static inline void __mwaitx(unsigned long eax, unsigned long ebx,
unsigned long ecx)
{
/* No MDS buffer clear as this is AMD/HYGON only */
/* No need for TSA buffer clearing on AMD */
/* "mwaitx %eax, %ebx, %ecx;" */
asm volatile(".byte 0x0f, 0x01, 0xfb;"
@@ -88,7 +86,7 @@ static inline void __mwaitx(unsigned long eax, unsigned long ebx,
static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{
mds_idle_clear_cpu_buffers();
/* "mwait %eax, %ecx;" */
asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
:: "a" (eax), "c" (ecx));
@@ -106,6 +104,11 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
*/
static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
{
if (need_resched())
return;
x86_idle_clear_cpu_buffers();
if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) {
if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) {
mb();
@@ -114,9 +117,13 @@ static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
}
__monitor((void *)&current_thread_info()->flags, 0, 0);
if (!need_resched())
__mwait(eax, ecx);
if (need_resched())
goto out;
__mwait(eax, ecx);
}
out:
current_clr_polling();
}

View File

@@ -191,27 +191,33 @@
.endm
/*
* Macro to execute VERW instruction that mitigate transient data sampling
* attacks such as MDS. On affected systems a microcode update overloaded VERW
* instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
*
* Macro to execute VERW insns that mitigate transient data sampling
* attacks such as MDS or TSA. On affected systems a microcode update
* overloaded VERW insns to also clear the CPU buffers. VERW clobbers
* CFLAGS.ZF.
* Note: Only the memory operand variant of VERW clears the CPU buffers.
*/
.macro CLEAR_CPU_BUFFERS
ALTERNATIVE "jmp .Lskip_verw_\@", "", X86_FEATURE_CLEAR_CPU_BUF
.macro __CLEAR_CPU_BUFFERS feature
ALTERNATIVE "jmp .Lskip_verw_\@", "", \feature
#ifdef CONFIG_X86_64
verw mds_verw_sel(%rip)
verw x86_verw_sel(%rip)
#else
/*
* In 32bit mode, the memory operand must be a %cs reference. The data
* segments may not be usable (vm86 mode), and the stack segment may not
* be flat (ESPFIX32).
*/
verw %cs:mds_verw_sel
verw %cs:x86_verw_sel
#endif
.Lskip_verw_\@:
.endm
#define CLEAR_CPU_BUFFERS \
__CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF
#define VM_CLEAR_CPU_BUFFERS \
__CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM
#else /* __ASSEMBLY__ */
#define ANNOTATE_RETPOLINE_SAFE \
@@ -226,6 +232,12 @@ extern void __x86_return_thunk(void);
static inline void __x86_return_thunk(void) {}
#endif
#ifdef CONFIG_MITIGATION_ITS
extern void its_return_thunk(void);
#else
static inline void its_return_thunk(void) {}
#endif
extern void retbleed_return_thunk(void);
extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void);
@@ -243,6 +255,11 @@ extern void (*x86_return_thunk)(void);
typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
#define ITS_THUNK_SIZE 64
typedef u8 its_thunk_t[ITS_THUNK_SIZE];
extern its_thunk_t __x86_indirect_its_thunk_array[];
#define GEN(reg) \
extern retpoline_thunk_t __x86_indirect_thunk_ ## reg;
#include <asm/GEN-for-each-reg.h>
@@ -387,22 +404,22 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
extern u16 mds_verw_sel;
extern u16 x86_verw_sel;
#include <asm/segment.h>
/**
* mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
* x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns
*
* This uses the otherwise unused and obsolete VERW instruction in
* combination with microcode which triggers a CPU buffer flush when the
* instruction is executed.
*/
static __always_inline void mds_clear_cpu_buffers(void)
static __always_inline void x86_clear_cpu_buffers(void)
{
static const u16 ds = __KERNEL_DS;
@@ -419,14 +436,15 @@ static __always_inline void mds_clear_cpu_buffers(void)
}
/**
* mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
* x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
* and TSA vulnerabilities.
*
* Clear CPU buffers if the corresponding static key is enabled
*/
static inline void mds_idle_clear_cpu_buffers(void)
static __always_inline void x86_idle_clear_cpu_buffers(void)
{
if (static_branch_likely(&mds_idle_clear))
mds_clear_cpu_buffers();
if (static_branch_likely(&cpu_buf_idle_clear))
x86_clear_cpu_buffers();
}
#endif /* __ASSEMBLY__ */

View File

@@ -104,6 +104,6 @@
#define REQUIRED_MASK19 0
#define REQUIRED_MASK20 0
#define REQUIRED_MASK21 0
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 23)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */

View File

@@ -181,6 +181,37 @@ void int3_emulate_ret(struct pt_regs *regs)
unsigned long ip = int3_emulate_pop(regs);
int3_emulate_jmp(regs, ip);
}
static __always_inline
void int3_emulate_jcc(struct pt_regs *regs, u8 cc, unsigned long ip, unsigned long disp)
{
static const unsigned long jcc_mask[6] = {
[0] = X86_EFLAGS_OF,
[1] = X86_EFLAGS_CF,
[2] = X86_EFLAGS_ZF,
[3] = X86_EFLAGS_CF | X86_EFLAGS_ZF,
[4] = X86_EFLAGS_SF,
[5] = X86_EFLAGS_PF,
};
bool invert = cc & 1;
bool match;
if (cc < 0xc) {
match = regs->flags & jcc_mask[cc >> 1];
} else {
match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
if (cc >= 0xe)
match = match || (regs->flags & X86_EFLAGS_ZF);
}
if ((match && !invert) || (!match && invert))
ip += disp;
int3_emulate_jmp(regs, ip);
}
#endif /* !CONFIG_UML_X86 */
#endif /* _ASM_X86_TEXT_PATCHING_H */

View File

@@ -18,6 +18,7 @@
#include <linux/mmu_context.h>
#include <linux/bsearch.h>
#include <linux/sync_core.h>
#include <linux/moduleloader.h>
#include <asm/text-patching.h>
#include <asm/alternative.h>
#include <asm/sections.h>
@@ -29,6 +30,7 @@
#include <asm/io.h>
#include <asm/fixmap.h>
#include <asm/asm-prototypes.h>
#include <asm/set_memory.h>
int __read_mostly alternatives_patched;
@@ -506,6 +508,12 @@ next:
kasan_enable_current();
}
static inline bool is_jcc32(struct insn *insn)
{
/* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80;
}
#if defined(CONFIG_RETPOLINE) && defined(CONFIG_STACK_VALIDATION)
/*
@@ -544,6 +552,225 @@ static int emit_indirect(int op, int reg, u8 *bytes)
return i;
}
#ifdef CONFIG_MITIGATION_ITS
#ifdef CONFIG_MODULES
static struct module *its_mod;
static void *its_page;
static unsigned int its_offset;
/* Initialize a thunk with the "jmp *reg; int3" instructions. */
static void *its_init_thunk(void *thunk, int reg)
{
u8 *bytes = thunk;
int i = 0;
if (reg >= 8) {
bytes[i++] = 0x41; /* REX.B prefix */
reg -= 8;
}
bytes[i++] = 0xff;
bytes[i++] = 0xe0 + reg; /* jmp *reg */
bytes[i++] = 0xcc;
return thunk;
}
void its_init_mod(struct module *mod)
{
if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
return;
mutex_lock(&text_mutex);
its_mod = mod;
its_page = NULL;
}
void its_fini_mod(struct module *mod)
{
int i;
if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
return;
WARN_ON_ONCE(its_mod != mod);
its_mod = NULL;
its_page = NULL;
mutex_unlock(&text_mutex);
for (i = 0; i < mod->its_num_pages; i++) {
void *page = mod->its_page_array[i];
set_memory_ro((unsigned long)page, 1);
set_memory_x((unsigned long)page, 1);
}
}
void its_free_mod(struct module *mod)
{
int i;
if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
return;
for (i = 0; i < mod->its_num_pages; i++) {
void *page = mod->its_page_array[i];
module_memfree(page);
}
kfree(mod->its_page_array);
}
static void *its_alloc(void)
{
void *page = module_alloc(PAGE_SIZE);
if (!page)
return NULL;
if (its_mod) {
void *tmp = krealloc(its_mod->its_page_array,
(its_mod->its_num_pages+1) * sizeof(void *),
GFP_KERNEL);
if (!tmp) {
module_memfree(page);
return NULL;
}
its_mod->its_page_array = tmp;
its_mod->its_page_array[its_mod->its_num_pages++] = page;
}
return page;
}
static void *its_allocate_thunk(int reg)
{
int size = 3 + (reg / 8);
void *thunk;
if (!its_page || (its_offset + size - 1) >= PAGE_SIZE) {
its_page = its_alloc();
if (!its_page) {
pr_err("ITS page allocation failed\n");
return NULL;
}
memset(its_page, INT3_INSN_OPCODE, PAGE_SIZE);
its_offset = 32;
}
/*
* If the indirect branch instruction will be in the lower half
* of a cacheline, then update the offset to reach the upper half.
*/
if ((its_offset + size - 1) % 64 < 32)
its_offset = ((its_offset - 1) | 0x3F) + 33;
thunk = its_page + its_offset;
its_offset += size;
set_memory_rw((unsigned long)its_page, 1);
thunk = its_init_thunk(thunk, reg);
set_memory_ro((unsigned long)its_page, 1);
set_memory_x((unsigned long)its_page, 1);
return thunk;
}
#else /* CONFIG_MODULES */
static void *its_allocate_thunk(int reg)
{
return NULL;
}
#endif /* CONFIG_MODULES */
static int __emit_trampoline(void *addr, struct insn *insn, u8 *bytes,
void *call_dest, void *jmp_dest)
{
u8 op = insn->opcode.bytes[0];
int i = 0;
/*
* Clang does 'weird' Jcc __x86_indirect_thunk_r11 conditional
* tail-calls. Deal with them.
*/
if (is_jcc32(insn)) {
bytes[i++] = op;
op = insn->opcode.bytes[1];
goto clang_jcc;
}
if (insn->length == 6)
bytes[i++] = 0x2e; /* CS-prefix */
switch (op) {
case CALL_INSN_OPCODE:
__text_gen_insn(bytes+i, op, addr+i,
call_dest,
CALL_INSN_SIZE);
i += CALL_INSN_SIZE;
break;
case JMP32_INSN_OPCODE:
clang_jcc:
__text_gen_insn(bytes+i, op, addr+i,
jmp_dest,
JMP32_INSN_SIZE);
i += JMP32_INSN_SIZE;
break;
default:
WARN(1, "%pS %px %*ph\n", addr, addr, 6, addr);
return -1;
}
WARN_ON_ONCE(i != insn->length);
return i;
}
static int emit_its_trampoline(void *addr, struct insn *insn, int reg, u8 *bytes)
{
u8 *thunk = __x86_indirect_its_thunk_array[reg];
u8 *tmp = its_allocate_thunk(reg);
if (tmp)
thunk = tmp;
return __emit_trampoline(addr, insn, bytes, thunk, thunk);
}
/* Check if an indirect branch is at ITS-unsafe address */
static bool cpu_wants_indirect_its_thunk_at(unsigned long addr, int reg)
{
if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
return false;
/* Indirect branch opcode is 2 or 3 bytes depending on reg */
addr += 1 + reg / 8;
/* Lower-half of the cacheline? */
return !(addr & 0x20);
}
u8 *its_static_thunk(int reg)
{
u8 *thunk = __x86_indirect_its_thunk_array[reg];
return thunk;
}
#else /* CONFIG_MITIGATION_ITS */
u8 *its_static_thunk(int reg)
{
WARN_ONCE(1, "ITS not compiled in");
return NULL;
}
#endif /* CONFIG_MITIGATION_ITS */
/*
* Rewrite the compiler generated retpoline thunk calls.
*
@@ -615,6 +842,15 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
bytes[i++] = 0xe8; /* LFENCE */
}
#ifdef CONFIG_MITIGATION_ITS
/*
* Check if the address of last byte of emitted-indirect is in
* lower-half of the cacheline. Such branches need ITS mitigation.
*/
if (cpu_wants_indirect_its_thunk_at((unsigned long)addr + i, reg))
return emit_its_trampoline(addr, insn, reg, bytes);
#endif
ret = emit_indirect(op, reg, bytes + i);
if (ret < 0)
return ret;
@@ -677,6 +913,21 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
#ifdef CONFIG_RETHUNK
bool cpu_wants_rethunk(void)
{
return cpu_feature_enabled(X86_FEATURE_RETHUNK);
}
bool cpu_wants_rethunk_at(void *addr)
{
if (!cpu_feature_enabled(X86_FEATURE_RETHUNK))
return false;
if (x86_return_thunk != its_return_thunk)
return true;
return !((unsigned long)addr & 0x20);
}
/*
* Rewrite the compiler generated return thunk tail-calls.
*
@@ -692,13 +943,12 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes)
{
int i = 0;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
if (x86_return_thunk == __x86_return_thunk)
return -1;
/* Patch the custom return thunks... */
if (cpu_wants_rethunk_at(addr)) {
i = JMP32_INSN_SIZE;
__text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
} else {
/* ... or patch them out if not needed. */
bytes[i++] = RET_INSN_OPCODE;
}
@@ -1318,6 +1568,11 @@ void text_poke_sync(void)
on_each_cpu(do_sync_core, NULL, 1);
}
/*
* NOTE: crazy scheme to allow patching Jcc.d32 but not increase the size of
* this thing. When len == 6 everything is prefixed with 0x0f and we map
* opcode to Jcc.d8, using len to distinguish.
*/
struct text_poke_loc {
/* addr := _stext + rel_addr */
s32 rel_addr;
@@ -1439,6 +1694,10 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
int3_emulate_jmp(regs, (long)ip + tp->disp);
break;
case 0x70 ... 0x7f: /* Jcc */
int3_emulate_jcc(regs, tp->opcode & 0xf, (long)ip, tp->disp);
break;
default:
BUG();
}
@@ -1512,16 +1771,26 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
* Second step: update all but the first byte of the patched range.
*/
for (do_sync = 0, i = 0; i < nr_entries; i++) {
u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
u8 old[POKE_MAX_OPCODE_SIZE+1] = { tp[i].old, };
u8 _new[POKE_MAX_OPCODE_SIZE+1];
const u8 *new = tp[i].text;
int len = tp[i].len;
if (len - INT3_INSN_SIZE > 0) {
memcpy(old + INT3_INSN_SIZE,
text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
len - INT3_INSN_SIZE);
if (len == 6) {
_new[0] = 0x0f;
memcpy(_new + 1, new, 5);
new = _new;
}
text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
(const char *)tp[i].text + INT3_INSN_SIZE,
new + INT3_INSN_SIZE,
len - INT3_INSN_SIZE);
do_sync++;
}
@@ -1549,8 +1818,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
* The old instruction is recorded so that the event can be
* processed forwards or backwards.
*/
perf_event_text_poke(text_poke_addr(&tp[i]), old, len,
tp[i].text, len);
perf_event_text_poke(text_poke_addr(&tp[i]), old, len, new, len);
}
if (do_sync) {
@@ -1567,10 +1835,15 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
* replacing opcode.
*/
for (do_sync = 0, i = 0; i < nr_entries; i++) {
if (tp[i].text[0] == INT3_INSN_OPCODE)
u8 byte = tp[i].text[0];
if (tp[i].len == 6)
byte = 0x0f;
if (byte == INT3_INSN_OPCODE)
continue;
text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE);
text_poke(text_poke_addr(&tp[i]), &byte, INT3_INSN_SIZE);
do_sync++;
}
@@ -1588,9 +1861,11 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
const void *opcode, size_t len, const void *emulate)
{
struct insn insn;
int ret, i;
int ret, i = 0;
memcpy((void *)tp->text, opcode, len);
if (len == 6)
i = 1;
memcpy((void *)tp->text, opcode+i, len-i);
if (!emulate)
emulate = opcode;
@@ -1601,6 +1876,13 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
tp->len = len;
tp->opcode = insn.opcode.bytes[0];
if (is_jcc32(&insn)) {
/*
* Map Jcc.d32 onto Jcc.d8 and use len to distinguish.
*/
tp->opcode = insn.opcode.bytes[1] - 0x10;
}
switch (tp->opcode) {
case RET_INSN_OPCODE:
case JMP32_INSN_OPCODE:
@@ -1617,7 +1899,6 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
BUG_ON(len != insn.length);
};
switch (tp->opcode) {
case INT3_INSN_OPCODE:
case RET_INSN_OPCODE:
@@ -1626,6 +1907,7 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
case CALL_INSN_OPCODE:
case JMP32_INSN_OPCODE:
case JMP8_INSN_OPCODE:
case 0x70 ... 0x7f: /* Jcc */
tp->disp = insn.immediate.value;
break;

View File

@@ -589,6 +589,62 @@ static void early_init_amd_mc(struct cpuinfo_x86 *c)
#endif
}
static bool amd_check_tsa_microcode(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
union zen_patch_rev p;
u32 min_rev = 0;
p.ext_fam = c->x86 - 0xf;
p.model = c->x86_model;
p.ext_model = c->x86_model >> 4;
p.stepping = c->x86_stepping;
if (c->x86 == 0x19) {
switch (p.ucode_rev >> 8) {
case 0xa0011: min_rev = 0x0a0011d7; break;
case 0xa0012: min_rev = 0x0a00123b; break;
case 0xa0082: min_rev = 0x0a00820d; break;
case 0xa1011: min_rev = 0x0a10114c; break;
case 0xa1012: min_rev = 0x0a10124c; break;
case 0xa1081: min_rev = 0x0a108109; break;
case 0xa2010: min_rev = 0x0a20102e; break;
case 0xa2012: min_rev = 0x0a201211; break;
case 0xa4041: min_rev = 0x0a404108; break;
case 0xa5000: min_rev = 0x0a500012; break;
case 0xa6012: min_rev = 0x0a60120a; break;
case 0xa7041: min_rev = 0x0a704108; break;
case 0xa7052: min_rev = 0x0a705208; break;
case 0xa7080: min_rev = 0x0a708008; break;
case 0xa70c0: min_rev = 0x0a70c008; break;
case 0xaa002: min_rev = 0x0aa00216; break;
default:
pr_debug("%s: ucode_rev: 0x%x, current revision: 0x%x\n",
__func__, p.ucode_rev, c->microcode);
return false;
}
}
if (!min_rev)
return false;
return c->microcode >= min_rev;
}
static void tsa_init(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
return;
if (c->x86 == 0x19) {
if (amd_check_tsa_microcode())
setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR);
} else {
setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO);
setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO);
}
}
static void bsp_init_amd(struct cpuinfo_x86 *c)
{
@@ -676,6 +732,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
}
resctrl_cpu_detect(c);
tsa_init(c);
}
static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)

View File

@@ -47,6 +47,8 @@ static void __init mmio_select_mitigation(void);
static void __init srbds_select_mitigation(void);
static void __init gds_select_mitigation(void);
static void __init srso_select_mitigation(void);
static void __init its_select_mitigation(void);
static void __init tsa_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
u64 x86_spec_ctrl_base;
@@ -63,6 +65,14 @@ static DEFINE_MUTEX(spec_ctrl_mutex);
void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
static void __init set_return_thunk(void *thunk)
{
if (x86_return_thunk != __x86_return_thunk)
pr_warn("x86/bugs: return thunk changed\n");
x86_return_thunk = thunk;
}
/* Update SPEC_CTRL MSR and its cached copy unconditionally */
static void update_spec_ctrl(u64 val)
{
@@ -109,9 +119,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
/* Control unconditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
/* Control MDS CPU buffer clear before idling (halt, mwait) */
DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
EXPORT_SYMBOL_GPL(mds_idle_clear);
/* Control CPU buffer clear before idling (halt, mwait) */
DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
/* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
@@ -161,6 +171,8 @@ void __init cpu_select_mitigations(void)
*/
srso_select_mitigation();
gds_select_mitigation();
its_select_mitigation();
tsa_select_mitigation();
}
/*
@@ -435,7 +447,7 @@ static void __init mmio_select_mitigation(void)
* is required irrespective of SMT state.
*/
if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
static_branch_enable(&mds_idle_clear);
static_branch_enable(&cpu_buf_idle_clear);
/*
* Check if the system has the right microcode.
@@ -1050,7 +1062,7 @@ do_cmd_auto:
setup_force_cpu_cap(X86_FEATURE_UNRET);
if (IS_ENABLED(CONFIG_RETHUNK))
x86_return_thunk = retbleed_return_thunk;
set_return_thunk(retbleed_return_thunk);
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
@@ -1111,6 +1123,116 @@ do_cmd_auto:
pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
}
#undef pr_fmt
#define pr_fmt(fmt) "ITS: " fmt
enum its_mitigation_cmd {
ITS_CMD_OFF,
ITS_CMD_ON,
ITS_CMD_VMEXIT,
};
enum its_mitigation {
ITS_MITIGATION_OFF,
ITS_MITIGATION_VMEXIT_ONLY,
ITS_MITIGATION_ALIGNED_THUNKS,
};
static const char * const its_strings[] = {
[ITS_MITIGATION_OFF] = "Vulnerable",
[ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected",
[ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks",
};
static enum its_mitigation its_mitigation __ro_after_init = ITS_MITIGATION_ALIGNED_THUNKS;
static enum its_mitigation_cmd its_cmd __ro_after_init =
IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_CMD_ON : ITS_CMD_OFF;
static int __init its_parse_cmdline(char *str)
{
if (!str)
return -EINVAL;
if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) {
pr_err("Mitigation disabled at compile time, ignoring option (%s)", str);
return 0;
}
if (!strcmp(str, "off")) {
its_cmd = ITS_CMD_OFF;
} else if (!strcmp(str, "on")) {
its_cmd = ITS_CMD_ON;
} else if (!strcmp(str, "force")) {
its_cmd = ITS_CMD_ON;
setup_force_cpu_bug(X86_BUG_ITS);
} else if (!strcmp(str, "vmexit")) {
its_cmd = ITS_CMD_VMEXIT;
} else {
pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
}
return 0;
}
early_param("indirect_target_selection", its_parse_cmdline);
static void __init its_select_mitigation(void)
{
enum its_mitigation_cmd cmd = its_cmd;
if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) {
its_mitigation = ITS_MITIGATION_OFF;
return;
}
/* Exit early to avoid irrelevant warnings */
if (cmd == ITS_CMD_OFF) {
its_mitigation = ITS_MITIGATION_OFF;
goto out;
}
if (spectre_v2_enabled == SPECTRE_V2_NONE) {
pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
its_mitigation = ITS_MITIGATION_OFF;
goto out;
}
if (!IS_ENABLED(CONFIG_RETPOLINE) || !IS_ENABLED(CONFIG_RETHUNK)) {
pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
its_mitigation = ITS_MITIGATION_OFF;
goto out;
}
if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
its_mitigation = ITS_MITIGATION_OFF;
goto out;
}
if (boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
its_mitigation = ITS_MITIGATION_OFF;
goto out;
}
switch (cmd) {
case ITS_CMD_OFF:
its_mitigation = ITS_MITIGATION_OFF;
break;
case ITS_CMD_VMEXIT:
if (boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY)) {
its_mitigation = ITS_MITIGATION_VMEXIT_ONLY;
goto out;
}
fallthrough;
case ITS_CMD_ON:
its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
set_return_thunk(its_return_thunk);
break;
}
out:
pr_info("%s\n", its_strings[its_mitigation]);
}
#undef pr_fmt
#define pr_fmt(fmt) "Spectre V2 : " fmt
@@ -1802,10 +1924,10 @@ static void update_mds_branch_idle(void)
return;
if (sched_smt_active()) {
static_branch_enable(&mds_idle_clear);
static_branch_enable(&cpu_buf_idle_clear);
} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
(ia32_cap & ARCH_CAP_FBSDP_NO)) {
static_branch_disable(&mds_idle_clear);
static_branch_disable(&cpu_buf_idle_clear);
}
}
@@ -1813,6 +1935,94 @@ static void update_mds_branch_idle(void)
#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
#undef pr_fmt
#define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt
enum tsa_mitigations {
TSA_MITIGATION_NONE,
TSA_MITIGATION_UCODE_NEEDED,
TSA_MITIGATION_USER_KERNEL,
TSA_MITIGATION_VM,
TSA_MITIGATION_FULL,
};
static const char * const tsa_strings[] = {
[TSA_MITIGATION_NONE] = "Vulnerable",
[TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
[TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary",
[TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM",
[TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
};
static enum tsa_mitigations tsa_mitigation __ro_after_init =
IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_FULL : TSA_MITIGATION_NONE;
static int __init tsa_parse_cmdline(char *str)
{
if (!str)
return -EINVAL;
if (!strcmp(str, "off"))
tsa_mitigation = TSA_MITIGATION_NONE;
else if (!strcmp(str, "on"))
tsa_mitigation = TSA_MITIGATION_FULL;
else if (!strcmp(str, "user"))
tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
else if (!strcmp(str, "vm"))
tsa_mitigation = TSA_MITIGATION_VM;
else
pr_err("Ignoring unknown tsa=%s option.\n", str);
return 0;
}
early_param("tsa", tsa_parse_cmdline);
static void __init tsa_select_mitigation(void)
{
if (tsa_mitigation == TSA_MITIGATION_NONE)
return;
if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) {
tsa_mitigation = TSA_MITIGATION_NONE;
return;
}
if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR))
tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
switch (tsa_mitigation) {
case TSA_MITIGATION_USER_KERNEL:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
break;
case TSA_MITIGATION_VM:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
break;
case TSA_MITIGATION_UCODE_NEEDED:
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
goto out;
pr_notice("Forcing mitigation on in a VM\n");
/*
* On the off-chance that microcode has been updated
* on the host, enable the mitigation in the guest just
* in case.
*/
fallthrough;
case TSA_MITIGATION_FULL:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
break;
default:
break;
}
out:
pr_info("%s\n", tsa_strings[tsa_mitigation]);
}
void cpu_bugs_smt_update(void)
{
mutex_lock(&spec_ctrl_mutex);
@@ -1866,6 +2076,24 @@ void cpu_bugs_smt_update(void)
break;
}
switch (tsa_mitigation) {
case TSA_MITIGATION_USER_KERNEL:
case TSA_MITIGATION_VM:
case TSA_MITIGATION_FULL:
case TSA_MITIGATION_UCODE_NEEDED:
/*
* TSA-SQ can potentially lead to info leakage between
* SMT threads.
*/
if (sched_smt_active())
static_branch_enable(&cpu_buf_idle_clear);
else
static_branch_disable(&cpu_buf_idle_clear);
break;
case TSA_MITIGATION_NONE:
break;
}
mutex_unlock(&spec_ctrl_mutex);
}
@@ -2453,10 +2681,10 @@ static void __init srso_select_mitigation(void)
if (boot_cpu_data.x86 == 0x19) {
setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
x86_return_thunk = srso_alias_return_thunk;
set_return_thunk(srso_alias_return_thunk);
} else {
setup_force_cpu_cap(X86_FEATURE_SRSO);
x86_return_thunk = srso_return_thunk;
set_return_thunk(srso_return_thunk);
}
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
} else {
@@ -2636,6 +2864,11 @@ static ssize_t rfds_show_state(char *buf)
return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
}
static ssize_t its_show_state(char *buf)
{
return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]);
}
static char *stibp_state(void)
{
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
@@ -2742,6 +2975,11 @@ static ssize_t srso_show_state(char *buf)
boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode");
}
static ssize_t tsa_show_state(char *buf)
{
return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
}
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug)
{
@@ -2800,6 +3038,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_RFDS:
return rfds_show_state(buf);
case X86_BUG_ITS:
return its_show_state(buf);
case X86_BUG_TSA:
return tsa_show_state(buf);
default:
break;
}
@@ -2879,4 +3123,14 @@ ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attrib
{
return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
}
ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf)
{
return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
}
ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
{
return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
}
#endif

View File

@@ -1135,6 +1135,12 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
#define GDS BIT(6)
/* CPU is affected by Register File Data Sampling */
#define RFDS BIT(7)
/* CPU is affected by Indirect Target Selection */
#define ITS BIT(8)
/* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
#define ITS_NATIVE_ONLY BIT(9)
/* CPU is affected by Transient Scheduler Attacks */
#define TSA BIT(10)
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
@@ -1146,22 +1152,25 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x0, 0x5), MMIO | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | ITS),
VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xb), MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | ITS),
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xc), MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | ITS),
VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED),
VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS),
VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS),
VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED),
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS),
VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS),
VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED | ITS),
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPPINGS(ALDERLAKE, X86_STEPPING_ANY, RFDS),
VULNBL_INTEL_STEPPINGS(ALDERLAKE_L, X86_STEPPING_ANY, RFDS),
VULNBL_INTEL_STEPPINGS(RAPTORLAKE, X86_STEPPING_ANY, RFDS),
@@ -1179,7 +1188,7 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_AMD(0x16, RETBLEED),
VULNBL_AMD(0x17, RETBLEED | SRSO),
VULNBL_HYGON(0x18, RETBLEED | SRSO),
VULNBL_AMD(0x19, SRSO),
VULNBL_AMD(0x19, SRSO | TSA),
{}
};
@@ -1225,6 +1234,32 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
return cpu_matches(cpu_vuln_blacklist, RFDS);
}
static bool __init vulnerable_to_its(u64 x86_arch_cap_msr)
{
/* The "immunity" bit trumps everything else: */
if (x86_arch_cap_msr & ARCH_CAP_ITS_NO)
return false;
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return false;
/* None of the affected CPUs have BHI_CTRL */
if (boot_cpu_has(X86_FEATURE_BHI_CTRL))
return false;
/*
* If a VMM did not expose ITS_NO, assume that a guest could
* be running on a vulnerable hardware or may migrate to such
* hardware.
*/
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
return true;
if (cpu_matches(cpu_vuln_blacklist, ITS))
return true;
return false;
}
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{
u64 ia32_cap = x86_read_arch_cap_msr();
@@ -1339,6 +1374,22 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET))
setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
if (vulnerable_to_its(ia32_cap)) {
setup_force_cpu_bug(X86_BUG_ITS);
if (cpu_matches(cpu_vuln_blacklist, ITS_NATIVE_ONLY))
setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
}
if (c->x86_vendor == X86_VENDOR_AMD) {
if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) ||
!cpu_has(c, X86_FEATURE_TSA_L1_NO)) {
if (cpu_matches(cpu_vuln_blacklist, TSA) ||
/* Enable bug on Zen guests to allow for live migration. */
(cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN)))
setup_force_cpu_bug(X86_BUG_TSA);
}
}
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;

View File

@@ -297,7 +297,6 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
struct thresh_restart {
struct threshold_block *b;
int reset;
int set_lvt_off;
int lvt_off;
u16 old_limit;
@@ -392,13 +391,13 @@ static void threshold_restart_bank(void *_tr)
rdmsr(tr->b->address, lo, hi);
if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
tr->reset = 1; /* limit cannot be lower than err count */
if (tr->reset) { /* reset err count and overflow bit */
hi =
(hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
(THRESHOLD_MAX - tr->b->threshold_limit);
/*
* Reset error count and overflow bit.
* This is done during init or after handling an interrupt.
*/
if (hi & MASK_OVERFLOW_HI || tr->set_lvt_off) {
hi &= ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI);
hi |= THRESHOLD_MAX - tr->b->threshold_limit;
} else if (tr->old_limit) { /* change limit w/o reset */
int new_count = (hi & THRESHOLD_MAX) +
(tr->old_limit - tr->b->threshold_limit);

View File

@@ -2627,15 +2627,9 @@ static int mce_cpu_dead(unsigned int cpu)
static int mce_cpu_online(unsigned int cpu)
{
struct timer_list *t = this_cpu_ptr(&mce_timer);
int ret;
mce_device_create(cpu);
ret = mce_threshold_create_device(cpu);
if (ret) {
mce_device_remove(cpu);
return ret;
}
mce_threshold_create_device(cpu);
mce_reenable_cpu();
mce_start_timer(t);
return 0;

View File

@@ -522,6 +522,7 @@ void mce_intel_feature_init(struct cpuinfo_x86 *c)
void mce_intel_feature_clear(struct cpuinfo_x86 *c)
{
intel_clear_lmce();
cmci_clear();
}
bool intel_filter_mce(struct mce *m)

View File

@@ -27,6 +27,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
{ X86_FEATURE_RRSBA_CTRL, CPUID_EDX, 2, 0x00000007, 2 },
{ X86_FEATURE_BHI_CTRL, CPUID_EDX, 4, 0x00000007, 2 },
{ X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 },
{ X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 },
{ X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 },

View File

@@ -367,7 +367,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
goto fail;
ip = trampoline + size;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
if (cpu_wants_rethunk_at(ip))
__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
else
memcpy(ip, retq, sizeof(retq));
@@ -422,8 +422,6 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
/* ALLOC_TRAMP flags lets us know we created it */
ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
set_vm_flush_reset_perms(trampoline);
if (likely(system_state != SYSTEM_BOOTING))
set_memory_ro((unsigned long)trampoline, npages);
set_memory_x((unsigned long)trampoline, npages);

View File

@@ -403,7 +403,6 @@ void *alloc_insn_page(void)
if (!page)
return NULL;
set_vm_flush_reset_perms(page);
/*
* First make the page read-only, and only then make it executable to
* prevent it from being W+X in between.
@@ -462,50 +461,26 @@ static void kprobe_emulate_call(struct kprobe *p, struct pt_regs *regs)
}
NOKPROBE_SYMBOL(kprobe_emulate_call);
static nokprobe_inline
void __kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs, bool cond)
static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
{
unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
if (cond)
ip += p->ainsn.rel32;
ip += p->ainsn.rel32;
int3_emulate_jmp(regs, ip);
}
static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
{
__kprobe_emulate_jmp(p, regs, true);
}
NOKPROBE_SYMBOL(kprobe_emulate_jmp);
static const unsigned long jcc_mask[6] = {
[0] = X86_EFLAGS_OF,
[1] = X86_EFLAGS_CF,
[2] = X86_EFLAGS_ZF,
[3] = X86_EFLAGS_CF | X86_EFLAGS_ZF,
[4] = X86_EFLAGS_SF,
[5] = X86_EFLAGS_PF,
};
static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs)
{
bool invert = p->ainsn.jcc.type & 1;
bool match;
unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
if (p->ainsn.jcc.type < 0xc) {
match = regs->flags & jcc_mask[p->ainsn.jcc.type >> 1];
} else {
match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
if (p->ainsn.jcc.type >= 0xe)
match = match || (regs->flags & X86_EFLAGS_ZF);
}
__kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert));
int3_emulate_jcc(regs, p->ainsn.jcc.type, ip, p->ainsn.rel32);
}
NOKPROBE_SYMBOL(kprobe_emulate_jcc);
static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
{
unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
bool match;
if (p->ainsn.loop.type != 3) { /* LOOP* */
@@ -533,7 +508,9 @@ static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
else if (p->ainsn.loop.type == 1) /* LOOPE */
match = match && (regs->flags & X86_EFLAGS_ZF);
__kprobe_emulate_jmp(p, regs, match);
if (match)
ip += p->ainsn.rel32;
int3_emulate_jmp(regs, ip);
}
NOKPROBE_SYMBOL(kprobe_emulate_loop);

View File

@@ -73,10 +73,10 @@ void *module_alloc(unsigned long size)
return NULL;
p = __vmalloc_node_range(size, MODULE_ALIGN,
MODULES_VADDR + get_module_load_offset(),
MODULES_END, GFP_KERNEL,
PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
MODULES_VADDR + get_module_load_offset(),
MODULES_END, GFP_KERNEL, PAGE_KERNEL,
VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
__builtin_return_address(0));
if (p && (kasan_module_alloc(p, size) < 0)) {
vfree(p);
return NULL;
@@ -278,10 +278,15 @@ int module_finalize(const Elf_Ehdr *hdr,
returns = s;
}
its_init_mod(me);
if (retpolines) {
void *rseg = (void *)retpolines->sh_addr;
apply_retpolines(rseg, rseg + retpolines->sh_size);
}
its_fini_mod(me);
if (returns) {
void *rseg = (void *)returns->sh_addr;
apply_returns(rseg, rseg + returns->sh_size);
@@ -317,4 +322,5 @@ int module_finalize(const Elf_Ehdr *hdr,
void module_arch_cleanup(struct module *mod)
{
alternatives_smp_module_del(mod);
its_free_mod(mod);
}

View File

@@ -825,6 +825,11 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
*/
static __cpuidle void mwait_idle(void)
{
if (need_resched())
return;
x86_idle_clear_cpu_buffers();
if (!current_set_polling_and_test()) {
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
mb(); /* quirk */
@@ -833,13 +838,17 @@ static __cpuidle void mwait_idle(void)
}
__monitor((void *)&current_thread_info()->flags, 0, 0);
if (!need_resched())
__sti_mwait(0, 0);
else
if (need_resched()) {
raw_local_irq_enable();
goto out;
}
__sti_mwait(0, 0);
} else {
raw_local_irq_enable();
}
out:
__current_clr_polling();
}

View File

@@ -41,7 +41,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
break;
case RET:
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
if (cpu_wants_rethunk_at(insn))
code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk);
else
code = &retinsn;

View File

@@ -538,6 +538,14 @@ INIT_PER_CPU(irq_stack_backing_store);
"SRSO function pair won't alias");
#endif
#ifdef CONFIG_MITIGATION_ITS
. = ASSERT(__x86_indirect_its_thunk_rax & 0x20, "__x86_indirect_thunk_rax not in second half of cacheline");
. = ASSERT(((__x86_indirect_its_thunk_rcx - __x86_indirect_its_thunk_rax) % 64) == 0, "Indirect thunks are not cacheline apart");
. = ASSERT(__x86_indirect_its_thunk_array == __x86_indirect_its_thunk_rax, "Gap in ITS thunk array");
. = ASSERT(its_return_thunk & 0x20, "its_return_thunk not in second half of cacheline");
#endif
#endif /* CONFIG_X86_32 */
#ifdef CONFIG_KEXEC_CORE

View File

@@ -500,6 +500,15 @@ void kvm_set_cpu_caps(void)
*/
kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0);
if (cpu_feature_enabled(X86_FEATURE_VERW_CLEAR))
kvm_cpu_cap_set(X86_FEATURE_VERW_CLEAR);
if (cpu_feature_enabled(X86_FEATURE_TSA_SQ_NO))
kvm_cpu_cap_set(X86_FEATURE_TSA_SQ_NO);
if (cpu_feature_enabled(X86_FEATURE_TSA_L1_NO))
kvm_cpu_cap_set(X86_FEATURE_TSA_L1_NO);
kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
@@ -810,7 +819,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->edx = 0;
break;
case 0x80000000:
entry->eax = min(entry->eax, 0x8000001f);
entry->eax = min(entry->eax, 0x80000021);
break;
case 0x80000001:
entry->ebx &= ~GENMASK(27, 16);
@@ -875,6 +884,26 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
if (!boot_cpu_has(X86_FEATURE_SEV))
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
break;
case 0x80000020:
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
break;
case 0x80000021:
entry->ebx = entry->edx = 0;
/*
* Pass down these bits:
* EAX 0 NNDBP, Processor ignores nested data breakpoints
* EAX 2 LAS, LFENCE always serializing
* EAX 5 VERW_CLEAR, mitigate TSA
* EAX 6 NSCB, Null selector clear base
*
* Other defined bits are for MSRs that KVM does not expose:
* EAX 3 SPCL, SMM page configuration lock
* EAX 13 PCMSR, Prefetch control MSR
*/
cpuid_entry_override(entry, CPUID_8000_0021_EAX);
entry->eax &= BIT(0) | BIT(2) | BIT(5) | BIT(6);
cpuid_entry_override(entry, CPUID_8000_0021_ECX);
break;
/*Add support for Centaur's CPUID instruction*/
case 0xC0000000:
/*Just support up to 0xC0000004 now*/

View File

@@ -64,6 +64,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
[CPUID_7_EDX] = { 7, 0, CPUID_EDX},
[CPUID_7_1_EAX] = { 7, 1, CPUID_EAX},
[CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
[CPUID_8000_0021_ECX] = {0x80000021, 0, CPUID_ECX},
};
/*

View File

@@ -77,6 +77,9 @@ SYM_FUNC_START(__svm_vcpu_run)
/* "POP" @vmcb to RAX. */
pop %_ASM_AX
/* Clobbers EFLAGS.ZF */
VM_CLEAR_CPU_BUFFERS
/* Enter guest mode */
sti
1: vmload %_ASM_AX

View File

@@ -6810,7 +6810,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
vmx_l1d_flush(vcpu);
else if (static_branch_unlikely(&mmio_stale_data_clear) &&
kvm_arch_has_assigned_device(vcpu->kvm))
mds_clear_cpu_buffers();
x86_clear_cpu_buffers();
vmx_disable_fb_clear(vmx);

View File

@@ -1390,7 +1390,7 @@ static unsigned int num_msr_based_features;
ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR)
ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_ITS_NO)
static u64 kvm_get_arch_capabilities(void)
{
@@ -1429,6 +1429,8 @@ static u64 kvm_get_arch_capabilities(void)
data |= ARCH_CAP_MDS_NO;
if (!boot_cpu_has_bug(X86_BUG_RFDS))
data |= ARCH_CAP_RFDS_NO;
if (!boot_cpu_has_bug(X86_BUG_ITS))
data |= ARCH_CAP_ITS_NO;
if (!boot_cpu_has(X86_FEATURE_RTM)) {
/*

View File

@@ -255,6 +255,45 @@ SYM_FUNC_START(entry_untrain_ret)
SYM_FUNC_END(entry_untrain_ret)
__EXPORT_THUNK(entry_untrain_ret)
#ifdef CONFIG_MITIGATION_ITS
.macro ITS_THUNK reg
SYM_INNER_LABEL(__x86_indirect_its_thunk_\reg, SYM_L_GLOBAL)
UNWIND_HINT_EMPTY
ANNOTATE_NOENDBR
ANNOTATE_RETPOLINE_SAFE
jmp *%\reg
int3
.align 32, 0xcc /* fill to the end of the line */
.skip 32, 0xcc /* skip to the next upper half */
.endm
/* ITS mitigation requires thunks be aligned to upper half of cacheline */
.align 64, 0xcc
.skip 32, 0xcc
SYM_CODE_START(__x86_indirect_its_thunk_array)
#define GEN(reg) ITS_THUNK reg
#include <asm/GEN-for-each-reg.h>
#undef GEN
.align 64, 0xcc
SYM_CODE_END(__x86_indirect_its_thunk_array)
.align 64, 0xcc
.skip 32, 0xcc
SYM_CODE_START(its_return_thunk)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
ANNOTATE_UNRET_SAFE
ret
int3
SYM_CODE_END(its_return_thunk)
EXPORT_SYMBOL(its_return_thunk)
#endif /* CONFIG_MITIGATION_ITS */
SYM_CODE_START(__x86_return_thunk)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR

View File

@@ -387,7 +387,11 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
int cnt = 0;
#ifdef CONFIG_RETPOLINE
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
if (IS_ENABLED(CONFIG_MITIGATION_ITS) &&
cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) {
OPTIMIZER_HIDE_VAR(reg);
emit_jump(&prog, its_static_thunk(reg), ip);
} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
EMIT_LFENCE();
EMIT2(0xFF, 0xE0 + reg);
} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
@@ -404,7 +408,7 @@ static void emit_return(u8 **pprog, u8 *ip)
u8 *prog = *pprog;
int cnt = 0;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
if (cpu_wants_rethunk()) {
emit_jump(&prog, x86_return_thunk, ip);
} else {
EMIT1(0xC3); /* ret */

View File

@@ -20,6 +20,9 @@
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/* Do not call this directly. Declared for export type visibility. */
extern __visible __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
/**
* csum_fold - Fold and invert a 32bit checksum.
* sum: 32bit unfolded sum

View File

@@ -128,8 +128,11 @@ static void round_robin_cpu(unsigned int tsk_index)
static void exit_round_robin(unsigned int tsk_index)
{
struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
tsk_in_cpu[tsk_index] = -1;
if (tsk_in_cpu[tsk_index] != -1) {
cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
tsk_in_cpu[tsk_index] = -1;
}
}
static unsigned int idle_pct = 5; /* percentage */

View File

@@ -483,6 +483,13 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(AE_NULL_OBJECT);
}
if (this_walk_state->num_operands < obj_desc->method.param_count) {
ACPI_ERROR((AE_INFO, "Missing argument for method [%4.4s]",
acpi_ut_get_node_name(method_node)));
return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
}
/* Init for new method, possibly wait on method mutex */
status =

View File

@@ -255,23 +255,10 @@ static int acpi_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_POWER_NOW:
if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) {
if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
break;
}
val->intval = battery->rate_now * 1000;
/*
* When discharging, the current should be reported as a
* negative number as per the power supply class interface
* definition.
*/
if (psp == POWER_SUPPLY_PROP_CURRENT_NOW &&
(battery->state & ACPI_BATTERY_STATE_DISCHARGING) &&
acpi_battery_handle_discharging(battery)
== POWER_SUPPLY_STATUS_DISCHARGING)
val->intval = -val->intval;
else
val->intval = battery->rate_now * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:

View File

@@ -27,7 +27,7 @@
#include <scsi/scsi_host.h>
#include <linux/dmi.h>
#ifdef CONFIG_X86_32
#if defined(CONFIG_X86) && defined(CONFIG_X86_32)
#include <asm/msr.h>
static int use_msr;
module_param_named(msr, use_msr, int, 0644);

View File

@@ -852,6 +852,8 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc,
IDT77252_PRV_PADDR(skb) = dma_map_single(&card->pcidev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&card->pcidev->dev, IDT77252_PRV_PADDR(skb)))
return -ENOMEM;
error = -EINVAL;
@@ -1863,6 +1865,8 @@ add_rx_skb(struct idt77252_dev *card, int queue,
paddr = dma_map_single(&card->pcidev->dev, skb->data,
skb_end_pointer(skb) - skb->data,
DMA_FROM_DEVICE);
if (dma_mapping_error(&card->pcidev->dev, paddr))
goto outpoolrm;
IDT77252_PRV_PADDR(skb) = paddr;
if (push_rx_skb(card, skb, queue)) {
@@ -1877,6 +1881,7 @@ outunmap:
dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb_end_pointer(skb) - skb->data, DMA_FROM_DEVICE);
outpoolrm:
handle = IDT77252_PRV_POOL(skb);
card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL;

View File

@@ -597,6 +597,17 @@ ssize_t __weak cpu_show_reg_file_data_sampling(struct device *dev,
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_indirect_target_selection(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
@@ -611,6 +622,8 @@ static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -627,6 +640,8 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_gather_data_sampling.attr,
&dev_attr_spec_rstack_overflow.attr,
&dev_attr_reg_file_data_sampling.attr,
&dev_attr_indirect_target_selection.attr,
&dev_attr_tsa.attr,
NULL
};

View File

@@ -591,7 +591,7 @@ retry:
goto retry;
}
ret = dma_fence_wait_timeout(fence, intr, ret);
ret = dma_fence_wait_timeout(fence, intr, timeout);
dma_fence_put(fence);
if (ret > 0 && wait_all && (i + 1 < shared_count))
goto retry;

View File

@@ -2844,6 +2844,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
return -EINVAL;
}
xdev->common.directions |= chan->direction;
/* Request the interrupt */
chan->irq = irq_of_parse_and_map(node, chan->tdest);
err = request_irq(chan->irq, xdev->dma_config->irq_handler,

View File

@@ -156,7 +156,7 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
packet->bitfields2.engine_sel =
engine_sel__mes_map_queues__compute_vi;
packet->bitfields2.gws_control_queue = q->gws ? 1 : 0;
packet->bitfields2.gws_control_queue = q->properties.is_gws ? 1 : 0;
packet->bitfields2.extended_engine_sel =
extended_engine_sel__mes_map_queues__legacy_engine_sel;
packet->bitfields2.queue_type =

View File

@@ -608,15 +608,18 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
unsigned long dsi_hss_hsa_hse_hbp;
unsigned int nlanes = output->dev->lanes;
int mode_clock = (mode_valid_check ? mode->clock : mode->crtc_clock);
int ret;
ret = cdns_dsi_mode2cfg(dsi, mode, dsi_cfg, mode_valid_check);
if (ret)
return ret;
phy_mipi_dphy_get_default_config(mode->crtc_clock * 1000,
mipi_dsi_pixel_format_to_bpp(output->dev->format),
nlanes, phy_cfg);
ret = phy_mipi_dphy_get_default_config(mode_clock * 1000,
mipi_dsi_pixel_format_to_bpp(output->dev->format),
nlanes, phy_cfg);
if (ret)
return ret;
ret = cdns_dsi_adjust_phy_config(dsi, dsi_cfg, phy_cfg, mode, mode_valid_check);
if (ret)
@@ -786,8 +789,9 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
unsigned long tx_byte_period;
struct cdns_dsi_cfg dsi_cfg;
u32 tmp, reg_wakeup, div;
u32 tmp, reg_wakeup, div, status;
int nlanes;
int i;
if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
return;
@@ -800,6 +804,19 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
cdns_dsi_hs_init(dsi);
cdns_dsi_init_link(dsi);
/*
* Now that the DSI Link and DSI Phy are initialized,
* wait for the CLK and Data Lanes to be ready.
*/
tmp = CLK_LANE_RDY;
for (i = 0; i < nlanes; i++)
tmp |= DATA_LANE_RDY(i);
if (readl_poll_timeout(dsi->regs + MCTL_MAIN_STS, status,
(tmp == (status & tmp)), 100, 500000))
dev_err(dsi->base.dev,
"Timed Out: DSI-DPhy Clock and Data Lanes not ready.\n");
writel(HBP_LEN(dsi_cfg.hbp) | HSA_LEN(dsi_cfg.hsa),
dsi->regs + VID_HSIZE1);
writel(HFP_LEN(dsi_cfg.hfp) | HACT_LEN(dsi_cfg.hact),
@@ -960,7 +977,7 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
bridge = drm_panel_bridge_add_typed(panel,
DRM_MODE_CONNECTOR_DSI);
} else {
bridge = of_drm_find_bridge(dev->dev.of_node);
bridge = of_drm_find_bridge(np);
if (!bridge)
bridge = ERR_PTR(-EINVAL);
}

View File

@@ -595,6 +595,10 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
if (!ctx->drm_dev)
goto out;
/* check if crtc and vblank have been initialized properly */
if (!drm_dev_has_vblank(ctx->drm_dev))
goto out;
if (!ctx->i80_if) {
drm_crtc_handle_vblank(&ctx->crtc->base);

View File

@@ -182,6 +182,7 @@ struct fimd_context {
u32 i80ifcon;
bool i80_if;
bool suspended;
bool dp_clk_enabled;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
atomic_t win_updated;
@@ -1003,7 +1004,18 @@ static void fimd_dp_clock_enable(struct exynos_drm_clk *clk, bool enable)
struct fimd_context *ctx = container_of(clk, struct fimd_context,
dp_clk);
u32 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
if (enable == ctx->dp_clk_enabled)
return;
if (enable)
pm_runtime_resume_and_get(ctx->dev);
ctx->dp_clk_enabled = enable;
writel(val, ctx->regs + DP_MIE_CLKCON);
if (!enable)
pm_runtime_put(ctx->dev);
}
static const struct exynos_drm_crtc_ops fimd_crtc_ops = {

View File

@@ -576,7 +576,6 @@ static int ring_context_alloc(struct intel_context *ce)
/* One ringbuffer to rule them all */
GEM_BUG_ON(!engine->legacy.ring);
ce->ring = engine->legacy.ring;
ce->timeline = intel_timeline_get(engine->legacy.timeline);
GEM_BUG_ON(ce->state);
if (engine->context_size) {
@@ -591,6 +590,8 @@ static int ring_context_alloc(struct intel_context *ce)
__set_bit(CONTEXT_VALID_BIT, &ce->flags);
}
ce->timeline = intel_timeline_get(engine->legacy.timeline);
return 0;
}

View File

@@ -71,8 +71,8 @@ static int igt_add_request(void *arg)
/* Basic preliminary test to create a request and let it loose! */
request = mock_request(rcs0(i915)->kernel_context, HZ / 10);
if (!request)
return -ENOMEM;
if (IS_ERR(request))
return PTR_ERR(request);
i915_request_add(request);
@@ -89,8 +89,8 @@ static int igt_wait_request(void *arg)
/* Submit a request, then wait upon it */
request = mock_request(rcs0(i915)->kernel_context, T);
if (!request)
return -ENOMEM;
if (IS_ERR(request))
return PTR_ERR(request);
i915_request_get(request);
@@ -158,8 +158,8 @@ static int igt_fence_wait(void *arg)
/* Submit a request, treat it as a fence and wait upon it */
request = mock_request(rcs0(i915)->kernel_context, T);
if (!request)
return -ENOMEM;
if (IS_ERR(request))
return PTR_ERR(request);
if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
pr_err("fence wait success before submit (expected timeout)!\n");
@@ -213,8 +213,8 @@ static int igt_request_rewind(void *arg)
GEM_BUG_ON(IS_ERR(ce));
request = mock_request(ce, 2 * HZ);
intel_context_put(ce);
if (!request) {
err = -ENOMEM;
if (IS_ERR(request)) {
err = PTR_ERR(request);
goto err_context_0;
}
@@ -227,8 +227,8 @@ static int igt_request_rewind(void *arg)
GEM_BUG_ON(IS_ERR(ce));
vip = mock_request(ce, 0);
intel_context_put(ce);
if (!vip) {
err = -ENOMEM;
if (IS_ERR(vip)) {
err = PTR_ERR(vip);
goto err_context_1;
}

View File

@@ -35,7 +35,7 @@ mock_request(struct intel_context *ce, unsigned long delay)
/* NB the i915->requests slab cache is enlarged to fit mock_request */
request = intel_context_create_request(ce);
if (IS_ERR(request))
return NULL;
return request;
request->mock.delay = delay;
return request;

View File

@@ -1134,10 +1134,16 @@ static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm,
if (wgrp->dc == dc->pipe) {
for (j = 0; j < wgrp->num_windows; j++) {
unsigned int index = wgrp->windows[j];
enum drm_plane_type type;
if (primary)
type = DRM_PLANE_TYPE_OVERLAY;
else
type = DRM_PLANE_TYPE_PRIMARY;
plane = tegra_shared_plane_create(drm, dc,
wgrp->index,
index);
index, type);
if (IS_ERR(plane))
return plane;
@@ -1145,10 +1151,8 @@ static struct drm_plane *tegra_dc_add_shared_planes(struct drm_device *drm,
* Choose the first shared plane owned by this
* head as the primary plane.
*/
if (!primary) {
plane->type = DRM_PLANE_TYPE_PRIMARY;
if (!primary)
primary = plane;
}
}
}
}
@@ -1202,7 +1206,10 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
if (crtc->state)
tegra_crtc_atomic_destroy_state(crtc, crtc->state);
__drm_atomic_helper_crtc_reset(crtc, &state->base);
if (state)
__drm_atomic_helper_crtc_reset(crtc, &state->base);
else
__drm_atomic_helper_crtc_reset(crtc, NULL);
}
static struct drm_crtc_state *

View File

@@ -551,9 +551,9 @@ static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
struct tegra_dc *dc,
unsigned int wgrp,
unsigned int index)
unsigned int index,
enum drm_plane_type type)
{
enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
struct tegra_drm *tegra = drm->dev_private;
struct tegra_display_hub *hub = tegra->hub;
/* planes can be assigned to arbitrary CRTCs */

View File

@@ -81,7 +81,8 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub);
struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
struct tegra_dc *dc,
unsigned int wgrp,
unsigned int index);
unsigned int index,
enum drm_plane_type type);
int tegra_display_hub_atomic_check(struct drm_device *drm,
struct drm_atomic_state *state);

View File

@@ -115,9 +115,9 @@ static void udl_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
drm_dev_unplug(dev);
drm_kms_helper_poll_fini(dev);
udl_drop_usb(dev);
drm_dev_unplug(dev);
}
/*

View File

@@ -37,6 +37,12 @@ struct v3d_queue_state {
u64 emit_seqno;
};
enum v3d_irq {
V3D_CORE_IRQ,
V3D_HUB_IRQ,
V3D_MAX_IRQS,
};
struct v3d_dev {
struct drm_device drm;
@@ -46,6 +52,7 @@ struct v3d_dev {
int ver;
bool single_irq_line;
int irq[V3D_MAX_IRQS];
void __iomem *hub_regs;
void __iomem *core_regs[3];
void __iomem *bridge_regs;

View File

@@ -120,6 +120,8 @@ v3d_reset(struct v3d_dev *v3d)
if (false)
v3d_idle_axi(v3d, 0);
v3d_irq_disable(v3d);
v3d_idle_gca(v3d);
v3d_reset_v3d(v3d);

View File

@@ -218,7 +218,7 @@ v3d_hub_irq(int irq, void *arg)
int
v3d_irq_init(struct v3d_dev *v3d)
{
int irq1, ret, core;
int irq, ret, core;
INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
@@ -229,17 +229,24 @@ v3d_irq_init(struct v3d_dev *v3d)
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
irq1 = platform_get_irq(v3d_to_pdev(v3d), 1);
if (irq1 == -EPROBE_DEFER)
return irq1;
if (irq1 > 0) {
ret = devm_request_irq(v3d->drm.dev, irq1,
irq = platform_get_irq(v3d_to_pdev(v3d), 1);
if (irq == -EPROBE_DEFER)
return irq;
if (irq > 0) {
v3d->irq[V3D_CORE_IRQ] = irq;
ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_CORE_IRQ],
v3d_irq, IRQF_SHARED,
"v3d_core0", v3d);
if (ret)
goto fail;
ret = devm_request_irq(v3d->drm.dev,
platform_get_irq(v3d_to_pdev(v3d), 0),
irq = platform_get_irq(v3d_to_pdev(v3d), 0);
if (irq < 0)
return irq;
v3d->irq[V3D_HUB_IRQ] = irq;
ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_HUB_IRQ],
v3d_hub_irq, IRQF_SHARED,
"v3d_hub", v3d);
if (ret)
@@ -247,8 +254,12 @@ v3d_irq_init(struct v3d_dev *v3d)
} else {
v3d->single_irq_line = true;
ret = devm_request_irq(v3d->drm.dev,
platform_get_irq(v3d_to_pdev(v3d), 0),
irq = platform_get_irq(v3d_to_pdev(v3d), 0);
if (irq < 0)
return irq;
v3d->irq[V3D_CORE_IRQ] = irq;
ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_CORE_IRQ],
v3d_irq, IRQF_SHARED,
"v3d", v3d);
if (ret)
@@ -283,12 +294,19 @@ void
v3d_irq_disable(struct v3d_dev *v3d)
{
int core;
int i;
/* Disable all interrupts. */
for (core = 0; core < v3d->cores; core++)
V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0);
V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0);
/* Finish any interrupt handler still in flight. */
for (i = 0; i < V3D_MAX_IRQS; i++) {
if (v3d->irq[i])
synchronize_irq(v3d->irq[i]);
}
/* Clear any pending interrupts we might have left. */
for (core = 0; core < v3d->cores; core++)
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);

View File

@@ -278,6 +278,8 @@
#define USB_DEVICE_ID_ASUS_AK1D 0x1125
#define USB_DEVICE_ID_CHICONY_TOSHIBA_WT10A 0x1408
#define USB_DEVICE_ID_CHICONY_ACER_SWITCH12 0x1421
#define USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA 0xb824
#define USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA2 0xb82c
#define USB_VENDOR_ID_CHUNGHWAT 0x2247
#define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001
@@ -1365,4 +1367,7 @@
#define USB_VENDOR_ID_SIGNOTEC 0x2133
#define USB_DEVICE_ID_SIGNOTEC_VIEWSONIC_PD1011 0x0018
#define USB_VENDOR_ID_SMARTLINKTECHNOLOGY 0x4c4a
#define USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155 0x4155
#endif

View File

@@ -726,6 +726,8 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) },
@@ -873,6 +875,7 @@ static const struct hid_device_id hid_ignore_list[] = {
#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SMARTLINKTECHNOLOGY, USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155) },
{ }
};

View File

@@ -2020,14 +2020,18 @@ static int wacom_initialize_remotes(struct wacom *wacom)
remote->remote_dir = kobject_create_and_add("wacom_remote",
&wacom->hdev->dev.kobj);
if (!remote->remote_dir)
if (!remote->remote_dir) {
kfifo_free(&remote->remote_fifo);
return -ENOMEM;
}
error = sysfs_create_files(remote->remote_dir, remote_unpair_attrs);
if (error) {
hid_err(wacom->hdev,
"cannot create sysfs group err: %d\n", error);
kfifo_free(&remote->remote_fifo);
kobject_put(remote->remote_dir);
return error;
}

View File

@@ -106,7 +106,9 @@ const struct vmbus_device vmbus_devs[] = {
},
/* File copy */
{ .dev_type = HV_FCOPY,
/* fcopy always uses 16KB ring buffer size and is working well for last many years */
{ .pref_ring_size = 0x4000,
.dev_type = HV_FCOPY,
HV_FCOPY_GUID,
.perf_device = false,
},
@@ -123,11 +125,18 @@ const struct vmbus_device vmbus_devs[] = {
.perf_device = false,
},
/* Unknown GUID */
{ .dev_type = HV_UNKNOWN,
/*
* Unknown GUID
* 64 KB ring buffer + 4 KB header should be sufficient size for any Hyper-V device apart
* from HV_NIC and HV_SCSI. This case avoid the fallback for unknown devices to allocate
* much bigger (2 MB) of ring size.
*/
{ .pref_ring_size = 0x11000,
.dev_type = HV_UNKNOWN,
.perf_device = false,
},
};
EXPORT_SYMBOL_GPL(vmbus_devs);
static const struct {
guid_t guid;
@@ -429,7 +438,7 @@ void hv_process_channel_removal(struct vmbus_channel *channel)
* init_vp_index() can (re-)use the CPU.
*/
if (hv_is_perf_channel(channel))
hv_clear_alloced_cpu(channel->target_cpu);
hv_clear_allocated_cpu(channel->target_cpu);
/*
* Upon suspend, an in-use hv_sock channel is marked as "rescinded" and
@@ -579,6 +588,17 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
*/
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (guid_equal(&channel->offermsg.offer.if_type,
&newchannel->offermsg.offer.if_type) &&
guid_equal(&channel->offermsg.offer.if_instance,
&newchannel->offermsg.offer.if_instance)) {
fnew = false;
newchannel->primary_channel = channel;
break;
}
}
init_vp_index(newchannel);
/* Remember the channels that should be cleaned up upon suspend. */
@@ -591,16 +611,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
*/
atomic_dec(&vmbus_connection.offer_in_progress);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (guid_equal(&channel->offermsg.offer.if_type,
&newchannel->offermsg.offer.if_type) &&
guid_equal(&channel->offermsg.offer.if_instance,
&newchannel->offermsg.offer.if_instance)) {
fnew = false;
break;
}
}
if (fnew) {
list_add_tail(&newchannel->listentry,
&vmbus_connection.chn_list);
@@ -622,7 +632,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
/*
* Process the sub-channel.
*/
newchannel->primary_channel = channel;
list_add_tail(&newchannel->sc_list, &channel->sc_list);
}
@@ -658,6 +667,30 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
queue_work(wq, &newchannel->add_channel_work);
}
/*
* Check if CPUs used by other channels of the same device.
* It should only be called by init_vp_index().
*/
static bool hv_cpuself_used(u32 cpu, struct vmbus_channel *chn)
{
struct vmbus_channel *primary = chn->primary_channel;
struct vmbus_channel *sc;
lockdep_assert_held(&vmbus_connection.channel_mutex);
if (!primary)
return false;
if (primary->target_cpu == cpu)
return true;
list_for_each_entry(sc, &primary->sc_list, sc_list)
if (sc != chn && sc->target_cpu == cpu)
return true;
return false;
}
/*
* We use this state to statically distribute the channel interrupt load.
*/
@@ -677,8 +710,9 @@ static int next_numa_node_id;
static void init_vp_index(struct vmbus_channel *channel)
{
bool perf_chn = hv_is_perf_channel(channel);
u32 i, ncpu = num_online_cpus();
cpumask_var_t available_mask;
struct cpumask *alloced_mask;
struct cpumask *allocated_mask;
u32 target_cpu;
int numa_node;
@@ -695,35 +729,42 @@ static void init_vp_index(struct vmbus_channel *channel)
*/
channel->target_cpu = VMBUS_CONNECT_CPU;
if (perf_chn)
hv_set_alloced_cpu(VMBUS_CONNECT_CPU);
hv_set_allocated_cpu(VMBUS_CONNECT_CPU);
return;
}
while (true) {
numa_node = next_numa_node_id++;
if (numa_node == nr_node_ids) {
next_numa_node_id = 0;
continue;
for (i = 1; i <= ncpu + 1; i++) {
while (true) {
numa_node = next_numa_node_id++;
if (numa_node == nr_node_ids) {
next_numa_node_id = 0;
continue;
}
if (cpumask_empty(cpumask_of_node(numa_node)))
continue;
break;
}
if (cpumask_empty(cpumask_of_node(numa_node)))
continue;
break;
allocated_mask = &hv_context.hv_numa_map[numa_node];
if (cpumask_weight(allocated_mask) ==
cpumask_weight(cpumask_of_node(numa_node))) {
/*
* We have cycled through all the CPUs in the node;
* reset the allocated map.
*/
cpumask_clear(allocated_mask);
}
cpumask_xor(available_mask, allocated_mask,
cpumask_of_node(numa_node));
target_cpu = cpumask_first(available_mask);
cpumask_set_cpu(target_cpu, allocated_mask);
if (channel->offermsg.offer.sub_channel_index >= ncpu ||
i > ncpu || !hv_cpuself_used(target_cpu, channel))
break;
}
alloced_mask = &hv_context.hv_numa_map[numa_node];
if (cpumask_weight(alloced_mask) ==
cpumask_weight(cpumask_of_node(numa_node))) {
/*
* We have cycled through all the CPUs in the node;
* reset the alloced map.
*/
cpumask_clear(alloced_mask);
}
cpumask_xor(available_mask, alloced_mask, cpumask_of_node(numa_node));
target_cpu = cpumask_first(available_mask);
cpumask_set_cpu(target_cpu, alloced_mask);
channel->target_cpu = target_cpu;

View File

@@ -406,7 +406,12 @@ static inline bool hv_is_perf_channel(struct vmbus_channel *channel)
return vmbus_devs[channel->device_id].perf_device;
}
static inline bool hv_is_alloced_cpu(unsigned int cpu)
static inline size_t hv_dev_ring_size(struct vmbus_channel *channel)
{
return vmbus_devs[channel->device_id].pref_ring_size;
}
static inline bool hv_is_allocated_cpu(unsigned int cpu)
{
struct vmbus_channel *channel, *sc;
@@ -428,23 +433,23 @@ static inline bool hv_is_alloced_cpu(unsigned int cpu)
return false;
}
static inline void hv_set_alloced_cpu(unsigned int cpu)
static inline void hv_set_allocated_cpu(unsigned int cpu)
{
cpumask_set_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
}
static inline void hv_clear_alloced_cpu(unsigned int cpu)
static inline void hv_clear_allocated_cpu(unsigned int cpu)
{
if (hv_is_alloced_cpu(cpu))
if (hv_is_allocated_cpu(cpu))
return;
cpumask_clear_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
}
static inline void hv_update_alloced_cpus(unsigned int old_cpu,
static inline void hv_update_allocated_cpus(unsigned int old_cpu,
unsigned int new_cpu)
{
hv_set_alloced_cpu(new_cpu);
hv_clear_alloced_cpu(old_cpu);
hv_set_allocated_cpu(new_cpu);
hv_clear_allocated_cpu(old_cpu);
}
#ifdef CONFIG_HYPERV_TESTING

View File

@@ -1790,7 +1790,7 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
/* See init_vp_index(). */
if (hv_is_perf_channel(channel))
hv_update_alloced_cpus(origin_cpu, target_cpu);
hv_update_allocated_cpus(origin_cpu, target_cpu);
/* Currently set only for storvsc channels. */
if (channel->change_target_cpu_callback) {

View File

@@ -34,16 +34,21 @@ enum chips { max34440, max34441, max34446, max34451, max34460, max34461 };
/*
* The whole max344* family have IOUT_OC_WARN_LIMIT and IOUT_OC_FAULT_LIMIT
* swapped from the standard pmbus spec addresses.
* For max34451, version MAX34451ETNA6+ and later has this issue fixed.
*/
#define MAX34440_IOUT_OC_WARN_LIMIT 0x46
#define MAX34440_IOUT_OC_FAULT_LIMIT 0x4A
#define MAX34451ETNA6_MFR_REV 0x0012
#define MAX34451_MFR_CHANNEL_CONFIG 0xe4
#define MAX34451_MFR_CHANNEL_CONFIG_SEL_MASK 0x3f
struct max34440_data {
int id;
struct pmbus_driver_info info;
u8 iout_oc_warn_limit;
u8 iout_oc_fault_limit;
};
#define to_max34440_data(x) container_of(x, struct max34440_data, info)
@@ -60,11 +65,11 @@ static int max34440_read_word_data(struct i2c_client *client, int page,
switch (reg) {
case PMBUS_IOUT_OC_FAULT_LIMIT:
ret = pmbus_read_word_data(client, page, phase,
MAX34440_IOUT_OC_FAULT_LIMIT);
data->iout_oc_fault_limit);
break;
case PMBUS_IOUT_OC_WARN_LIMIT:
ret = pmbus_read_word_data(client, page, phase,
MAX34440_IOUT_OC_WARN_LIMIT);
data->iout_oc_warn_limit);
break;
case PMBUS_VIRT_READ_VOUT_MIN:
ret = pmbus_read_word_data(client, page, phase,
@@ -133,11 +138,11 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
switch (reg) {
case PMBUS_IOUT_OC_FAULT_LIMIT:
ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_FAULT_LIMIT,
ret = pmbus_write_word_data(client, page, data->iout_oc_fault_limit,
word);
break;
case PMBUS_IOUT_OC_WARN_LIMIT:
ret = pmbus_write_word_data(client, page, MAX34440_IOUT_OC_WARN_LIMIT,
ret = pmbus_write_word_data(client, page, data->iout_oc_warn_limit,
word);
break;
case PMBUS_VIRT_RESET_POUT_HISTORY:
@@ -235,6 +240,25 @@ static int max34451_set_supported_funcs(struct i2c_client *client,
*/
int page, rv;
bool max34451_na6 = false;
rv = i2c_smbus_read_word_data(client, PMBUS_MFR_REVISION);
if (rv < 0)
return rv;
if (rv >= MAX34451ETNA6_MFR_REV) {
max34451_na6 = true;
data->info.format[PSC_VOLTAGE_IN] = direct;
data->info.format[PSC_CURRENT_IN] = direct;
data->info.m[PSC_VOLTAGE_IN] = 1;
data->info.b[PSC_VOLTAGE_IN] = 0;
data->info.R[PSC_VOLTAGE_IN] = 3;
data->info.m[PSC_CURRENT_IN] = 1;
data->info.b[PSC_CURRENT_IN] = 0;
data->info.R[PSC_CURRENT_IN] = 2;
data->iout_oc_fault_limit = PMBUS_IOUT_OC_FAULT_LIMIT;
data->iout_oc_warn_limit = PMBUS_IOUT_OC_WARN_LIMIT;
}
for (page = 0; page < 16; page++) {
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
@@ -251,16 +275,30 @@ static int max34451_set_supported_funcs(struct i2c_client *client,
case 0x20:
data->info.func[page] = PMBUS_HAVE_VOUT |
PMBUS_HAVE_STATUS_VOUT;
if (max34451_na6)
data->info.func[page] |= PMBUS_HAVE_VIN |
PMBUS_HAVE_STATUS_INPUT;
break;
case 0x21:
data->info.func[page] = PMBUS_HAVE_VOUT;
if (max34451_na6)
data->info.func[page] |= PMBUS_HAVE_VIN;
break;
case 0x22:
data->info.func[page] = PMBUS_HAVE_IOUT |
PMBUS_HAVE_STATUS_IOUT;
if (max34451_na6)
data->info.func[page] |= PMBUS_HAVE_IIN |
PMBUS_HAVE_STATUS_INPUT;
break;
case 0x23:
data->info.func[page] = PMBUS_HAVE_IOUT;
if (max34451_na6)
data->info.func[page] |= PMBUS_HAVE_IIN;
break;
default:
break;
@@ -494,6 +532,8 @@ static int max34440_probe(struct i2c_client *client)
return -ENOMEM;
data->id = i2c_match_id(max34440_id, client)->driver_data;
data->info = max34440_info[data->id];
data->iout_oc_fault_limit = MAX34440_IOUT_OC_FAULT_LIMIT;
data->iout_oc_warn_limit = MAX34440_IOUT_OC_WARN_LIMIT;
if (data->id == max34451) {
rv = max34451_set_supported_funcs(client, data);

View File

@@ -111,6 +111,11 @@ static u32 osif_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
/* prevent invalid 0-length usb_control_msg */
static const struct i2c_adapter_quirks osif_quirks = {
.flags = I2C_AQ_NO_ZERO_LEN_READ,
};
static const struct i2c_algorithm osif_algorithm = {
.master_xfer = osif_xfer,
.functionality = osif_func,
@@ -143,6 +148,7 @@ static int osif_probe(struct usb_interface *interface,
priv->adapter.owner = THIS_MODULE;
priv->adapter.class = I2C_CLASS_HWMON;
priv->adapter.quirks = &osif_quirks;
priv->adapter.algo = &osif_algorithm;
priv->adapter.algo_data = priv;
snprintf(priv->adapter.name, sizeof(priv->adapter.name),

View File

@@ -140,6 +140,11 @@ out:
return ret;
}
/* prevent invalid 0-length usb_control_msg */
static const struct i2c_adapter_quirks usb_quirks = {
.flags = I2C_AQ_NO_ZERO_LEN_READ,
};
/* This is the actual algorithm we define */
static const struct i2c_algorithm usb_algorithm = {
.master_xfer = usb_xfer,
@@ -244,6 +249,7 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface,
/* setup i2c adapter description */
dev->adapter.owner = THIS_MODULE;
dev->adapter.class = I2C_CLASS_HWMON;
dev->adapter.quirks = &usb_quirks;
dev->adapter.algo = &usb_algorithm;
dev->adapter.algo_data = dev;
snprintf(dev->adapter.name, sizeof(dev->adapter.name),

View File

@@ -582,7 +582,7 @@ static int zpa2326_fill_sample_buffer(struct iio_dev *indio_dev,
struct {
u32 pressure;
u16 temperature;
u64 timestamp;
aligned_s64 timestamp;
} sample;
int err;

View File

@@ -211,8 +211,7 @@ static void free_cm_id(struct iwcm_id_private *cm_id_priv)
*/
static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
{
BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
if (atomic_dec_and_test(&cm_id_priv->refcount)) {
if (refcount_dec_and_test(&cm_id_priv->refcount)) {
BUG_ON(!list_empty(&cm_id_priv->work_list));
free_cm_id(cm_id_priv);
return 1;
@@ -225,7 +224,7 @@ static void add_ref(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
atomic_inc(&cm_id_priv->refcount);
refcount_inc(&cm_id_priv->refcount);
}
static void rem_ref(struct iw_cm_id *cm_id)
@@ -257,7 +256,7 @@ struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
cm_id_priv->id.add_ref = add_ref;
cm_id_priv->id.rem_ref = rem_ref;
spin_lock_init(&cm_id_priv->lock);
atomic_set(&cm_id_priv->refcount, 1);
refcount_set(&cm_id_priv->refcount, 1);
init_waitqueue_head(&cm_id_priv->connect_wait);
init_completion(&cm_id_priv->destroy_comp);
INIT_LIST_HEAD(&cm_id_priv->work_list);
@@ -368,12 +367,9 @@ EXPORT_SYMBOL(iw_cm_disconnect);
/*
* CM_ID <-- DESTROYING
*
* Clean up all resources associated with the connection and release
* the initial reference taken by iw_create_cm_id.
*
* Returns true if and only if the last cm_id_priv reference has been dropped.
* Clean up all resources associated with the connection.
*/
static bool destroy_cm_id(struct iw_cm_id *cm_id)
static void destroy_cm_id(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
struct ib_qp *qp;
@@ -442,20 +438,22 @@ static bool destroy_cm_id(struct iw_cm_id *cm_id)
iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
}
return iwcm_deref_id(cm_id_priv);
}
/*
* This function is only called by the application thread and cannot
* be called by the event thread. The function will wait for all
* references to be released on the cm_id and then kfree the cm_id
* object.
* Destroy cm_id. If the cm_id still has other references, wait for all
* references to be released on the cm_id and then release the initial
* reference taken by iw_create_cm_id.
*/
void iw_destroy_cm_id(struct iw_cm_id *cm_id)
{
if (!destroy_cm_id(cm_id))
struct iwcm_id_private *cm_id_priv;
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
destroy_cm_id(cm_id);
if (refcount_read(&cm_id_priv->refcount) > 1)
flush_workqueue(iwcm_wq);
iwcm_deref_id(cm_id_priv);
}
EXPORT_SYMBOL(iw_destroy_cm_id);
@@ -1038,8 +1036,10 @@ static void cm_work_handler(struct work_struct *_work)
if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
ret = process_event(cm_id_priv, &levent);
if (ret)
WARN_ON_ONCE(destroy_cm_id(&cm_id_priv->id));
if (ret) {
destroy_cm_id(&cm_id_priv->id);
WARN_ON_ONCE(iwcm_deref_id(cm_id_priv));
}
} else
pr_debug("dropping event %d\n", levent.event);
if (iwcm_deref_id(cm_id_priv))
@@ -1097,7 +1097,7 @@ static int cm_event_handler(struct iw_cm_id *cm_id,
}
}
atomic_inc(&cm_id_priv->refcount);
refcount_inc(&cm_id_priv->refcount);
if (list_empty(&cm_id_priv->work_list)) {
list_add_tail(&work->list, &cm_id_priv->work_list);
queue_work(iwcm_wq, &work->work);

View File

@@ -52,7 +52,7 @@ struct iwcm_id_private {
wait_queue_head_t connect_wait;
struct list_head work_list;
spinlock_t lock;
atomic_t refcount;
refcount_t refcount;
struct list_head work_free_list;
};

View File

@@ -279,7 +279,7 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
*/
goto done;
}
ret = mlx5_lag_query_cong_counters(dev->mdev,
ret = mlx5_lag_query_cong_counters(mdev,
stats->value +
cnts->num_q_counters,
cnts->num_cong_counters,

View File

@@ -1809,6 +1809,7 @@ subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
/* Level1 is valid for future use, no need to free */
return -ENOMEM;
INIT_LIST_HEAD(&obj_event->obj_sub_list);
err = xa_insert(&event->object_ids,
key_level2,
obj_event,
@@ -1817,7 +1818,6 @@ subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
kfree(obj_event);
return err;
}
INIT_LIST_HEAD(&obj_event->obj_sub_list);
}
return 0;

View File

@@ -1667,6 +1667,33 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
}
static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
struct mlx5_core_dev *slave)
{
int err;
err = mlx5_nic_vport_update_local_lb(master, true);
if (err)
return err;
err = mlx5_nic_vport_update_local_lb(slave, true);
if (err)
goto out;
return 0;
out:
mlx5_nic_vport_update_local_lb(master, false);
return err;
}
static void mlx5_ib_disable_lb_mp(struct mlx5_core_dev *master,
struct mlx5_core_dev *slave)
{
mlx5_nic_vport_update_local_lb(slave, false);
mlx5_nic_vport_update_local_lb(master, false);
}
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
{
int err = 0;
@@ -3424,6 +3451,8 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
lockdep_assert_held(&mlx5_ib_multiport_mutex);
mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev);
mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
spin_lock(&port->mp.mpi_lock);
@@ -3512,6 +3541,10 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
mlx5_ib_init_cong_debugfs(ibdev, port_num);
err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev);
if (err)
goto unbind;
return true;
unbind:

View File

@@ -147,6 +147,7 @@ static const struct xpad_device {
{ 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX },
{ 0x05fe, 0x3030, "Chic Controller", 0, XTYPE_XBOX },
{ 0x05fe, 0x3031, "Chic Controller", 0, XTYPE_XBOX },
{ 0x0502, 0x1305, "Acer NGR200", 0, XTYPE_XBOX },
{ 0x062a, 0x0020, "Logic3 Xbox GamePad", 0, XTYPE_XBOX },
{ 0x062a, 0x0033, "Competition Pro Steering Wheel", 0, XTYPE_XBOX },
{ 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX },
@@ -275,6 +276,7 @@ static const struct xpad_device {
{ 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 },
{ 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 },
{ 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0x0130, "Ion Drum Rocker", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
@@ -439,6 +441,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
XPAD_XBOX360_VENDOR(0x0502), /* Acer Inc. Xbox 360 style controllers */
XPAD_XBOX360_VENDOR(0x056e), /* Elecom JC-U3613M */
XPAD_XBOX360_VENDOR(0x06a3), /* Saitek P3600 */
XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
@@ -451,6 +454,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
XPAD_XBOXONE_VENDOR(0x10f5), /* Turtle Beach Controllers */
XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
XPAD_XBOX360_VENDOR(0x11ff), /* PXN V900 */
XPAD_XBOX360_VENDOR(0x1209), /* Ardwiino Controllers */
@@ -462,6 +466,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
XPAD_XBOX360_VENDOR(0x1949), /* Amazon controllers */
XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */
XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */

View File

@@ -817,7 +817,7 @@ static int atkbd_probe(struct atkbd *atkbd)
if (atkbd_skip_getid(atkbd)) {
atkbd->id = 0xab83;
return 0;
goto deactivate_kbd;
}
/*
@@ -854,6 +854,7 @@ static int atkbd_probe(struct atkbd *atkbd)
return -1;
}
deactivate_kbd:
/*
* Make sure nothing is coming from the keyboard and disturbs our
* internal state.

View File

@@ -59,7 +59,8 @@ static ssize_t multi_intensity_store(struct device *dev,
for (i = 0; i < mcled_cdev->num_colors; i++)
mcled_cdev->subled_info[i].intensity = intensity_value[i];
led_set_brightness(led_cdev, led_cdev->brightness);
if (!test_bit(LED_BLINK_SW, &led_cdev->work_flags))
led_set_brightness(led_cdev, led_cdev->brightness);
ret = size;
err_out:
mutex_unlock(&led_cdev->led_access);

View File

@@ -453,8 +453,8 @@ void mbox_free_channel(struct mbox_chan *chan)
if (chan->txdone_method == TXDONE_BY_ACK)
chan->txdone_method = TXDONE_BY_POLL;
module_put(chan->mbox->dev->driver->owner);
spin_unlock_irqrestore(&chan->lock, flags);
module_put(chan->mbox->dev->driver->owner);
}
EXPORT_SYMBOL_GPL(mbox_free_channel);

View File

@@ -1765,7 +1765,12 @@ static void cache_set_flush(struct closure *cl)
mutex_unlock(&b->write_lock);
}
if (ca->alloc_thread)
/*
* If the register_cache_set() call to bch_cache_set_alloc() failed,
* ca has not been assigned a value and return error.
* So we need check ca is not NULL during bch_cache_set_unregister().
*/
if (ca && ca->alloc_thread)
kthread_stop(ca->alloc_thread);
if (c->journal.cur) {

View File

@@ -2381,7 +2381,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
*/
sb_retrieve_failed_devices(sb, failed_devices);
rdev_for_each(r, mddev) {
if (test_bit(Journal, &rdev->flags) ||
if (test_bit(Journal, &r->flags) ||
!r->sb_page)
continue;
sb2 = page_address(r->sb_page);

View File

@@ -546,7 +546,7 @@ static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
* is a good choice? We choose COUNTER_MAX / 2 arbitrarily.
*/
write_behind = bitmap->mddev->bitmap_info.max_write_behind;
if (write_behind > COUNTER_MAX)
if (write_behind > COUNTER_MAX / 2)
write_behind = COUNTER_MAX / 2;
sb->write_behind = cpu_to_le32(write_behind);
bitmap->mddev->bitmap_info.max_write_behind = write_behind;

View File

@@ -3290,6 +3290,7 @@ static int raid1_reshape(struct mddev *mddev)
/* ok, everything is stopped */
oldpool = conf->r1bio_pool;
conf->r1bio_pool = newpool;
init_waitqueue_head(&conf->r1bio_pool.wait);
for (d = d2 = 0; d < conf->raid_disks; d++) {
struct md_rdev *rdev = conf->mirrors[d].rdev;

View File

@@ -446,8 +446,8 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
if (ret < 0)
goto done;
dma_sync_sg_for_cpu(isp->dev, req->table.sgt.sgl,
req->table.sgt.nents, DMA_TO_DEVICE);
dma_sync_sgtable_for_cpu(isp->dev, &req->table.sgt,
DMA_TO_DEVICE);
if (copy_from_user(req->table.addr, config->lsc,
req->config.size)) {
@@ -455,8 +455,8 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
goto done;
}
dma_sync_sg_for_device(isp->dev, req->table.sgt.sgl,
req->table.sgt.nents, DMA_TO_DEVICE);
dma_sync_sgtable_for_device(isp->dev, &req->table.sgt,
DMA_TO_DEVICE);
}
spin_lock_irqsave(&ccdc->lsc.req_lock, flags);

View File

@@ -161,8 +161,7 @@ static void isp_stat_buf_sync_for_device(struct ispstat *stat,
if (ISP_STAT_USES_DMAENGINE(stat))
return;
dma_sync_sg_for_device(stat->isp->dev, buf->sgt.sgl,
buf->sgt.nents, DMA_FROM_DEVICE);
dma_sync_sgtable_for_device(stat->isp->dev, &buf->sgt, DMA_FROM_DEVICE);
}
static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
@@ -171,8 +170,7 @@ static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
if (ISP_STAT_USES_DMAENGINE(stat))
return;
dma_sync_sg_for_cpu(stat->isp->dev, buf->sgt.sgl,
buf->sgt.nents, DMA_FROM_DEVICE);
dma_sync_sgtable_for_cpu(stat->isp->dev, &buf->sgt, DMA_FROM_DEVICE);
}
static void isp_stat_buf_clear(struct ispstat *stat)

View File

@@ -1429,7 +1429,9 @@ static bool uvc_ctrl_xctrls_has_control(const struct v4l2_ext_control *xctrls,
}
static void uvc_ctrl_send_events(struct uvc_fh *handle,
const struct v4l2_ext_control *xctrls, unsigned int xctrls_count)
struct uvc_entity *entity,
const struct v4l2_ext_control *xctrls,
unsigned int xctrls_count)
{
struct uvc_control_mapping *mapping;
struct uvc_control *ctrl;
@@ -1440,6 +1442,9 @@ static void uvc_ctrl_send_events(struct uvc_fh *handle,
u32 changes = V4L2_EVENT_CTRL_CH_VALUE;
ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping);
if (ctrl->entity != entity)
continue;
if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
/* Notification will be sent from an Interrupt event. */
continue;
@@ -1560,14 +1565,19 @@ int uvc_ctrl_begin(struct uvc_video_chain *chain)
return mutex_lock_interruptible(&chain->ctrl_mutex) ? -ERESTARTSYS : 0;
}
/*
* Returns the number of uvc controls that have been correctly set, or a
* negative number if there has been an error.
*/
static int uvc_ctrl_commit_entity(struct uvc_device *dev,
struct uvc_fh *handle,
struct uvc_entity *entity,
int rollback)
{
unsigned int processed_ctrls = 0;
struct uvc_control *ctrl;
unsigned int i;
int ret;
int ret = 0;
if (entity == NULL)
return 0;
@@ -1595,8 +1605,9 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
dev->intfnum, ctrl->info.selector,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
ctrl->info.size);
else
ret = 0;
if (!ret)
processed_ctrls++;
if (rollback || ret < 0)
memcpy(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
@@ -1605,15 +1616,23 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
ctrl->dirty = 0;
if (ret < 0)
return ret;
if (!rollback && handle &&
if (!rollback && handle && !ret &&
ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
uvc_ctrl_set_handle(handle, ctrl, handle);
if (ret < 0 && !rollback) {
/*
* If we fail to set a control, we need to rollback
* the next ones.
*/
rollback = 1;
}
}
return 0;
if (ret)
return ret;
return processed_ctrls;
}
int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
@@ -1622,21 +1641,31 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
{
struct uvc_video_chain *chain = handle->chain;
struct uvc_entity *entity;
int ret = 0;
int ret_out = 0;
int ret;
/* Find the control. */
list_for_each_entry(entity, &chain->entities, chain) {
ret = uvc_ctrl_commit_entity(chain->dev, handle, entity,
rollback);
if (ret < 0)
goto done;
if (ret < 0) {
/*
* When we fail to commit an entity, we need to
* restore the UVC_CTRL_DATA_BACKUP for all the
* controls in the other entities, otherwise our cache
* and the hardware will be out of sync.
*/
rollback = 1;
ret_out = ret;
} else if (ret > 0 && !rollback) {
uvc_ctrl_send_events(handle, entity, xctrls,
xctrls_count);
}
}
if (!rollback)
uvc_ctrl_send_events(handle, xctrls, xctrls_count);
done:
mutex_unlock(&chain->ctrl_mutex);
return ret;
return ret_out;
}
int uvc_ctrl_get(struct uvc_video_chain *chain,

View File

@@ -467,6 +467,7 @@ static int max14577_i2c_remove(struct i2c_client *i2c)
{
struct max14577 *max14577 = i2c_get_clientdata(i2c);
device_init_wakeup(max14577->dev, false);
mfd_remove_devices(max14577->dev);
regmap_del_irq_chip(max14577->irq, max14577->irq_data);
if (max14577->dev_type == MAXIM_DEVICE_TYPE_MAX77836)

View File

@@ -227,6 +227,7 @@ static int drv_cp_harray_to_user(void __user *user_buf_uva,
static int vmci_host_setup_notify(struct vmci_ctx *context,
unsigned long uva)
{
struct page *page;
int retval;
if (context->notify_page) {
@@ -243,11 +244,11 @@ static int vmci_host_setup_notify(struct vmci_ctx *context,
/*
* Lock physical page backing a given user VA.
*/
retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page);
if (retval != 1) {
context->notify_page = NULL;
retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &page);
if (retval != 1)
return VMCI_ERROR_GENERIC;
}
context->notify_page = page;
/*
* Map the locked page and set up notify pointer.

Some files were not shown because too many files have changed in this diff Show More