Merge tag 'ASB-2022-12-05_4.14-stable' of https://android.googlesource.com/kernel/common into android13-4.14-msmnile

https://source.android.com/docs/security/bulletin/2022-12-01
CVE-2022-23960

* tag 'ASB-2022-12-05_4.14-stable' of https://android.googlesource.com/kernel/common:
  Linux 4.14.301
  Revert "x86/speculation: Change FILL_RETURN_BUFFER to work with objtool"
  x86/nospec: Fix i386 RSB stuffing
  ipc/sem: Fix dangling sem_array access in semtimedop race
  v4l2: don't fall back to follow_pfn() if pin_user_pages_fast() fails
  proc: proc_skip_spaces() shouldn't think it is working on C strings
  proc: avoid integer type confusion in get_proc_long
  mmc: sdhci: Fix voltage switch delay
  mmc: sdhci: use FIELD_GET for preset value bit masks
  x86/ioremap: Fix page aligned size calculation in __ioremap_caller()
  Bluetooth: L2CAP: Fix accepting connection request for invalid SPSM
  x86/pm: Add enumeration check before spec MSRs save/restore setup
  x86/tsx: Add a feature bit for TSX control MSR support
  nvme: restrict management ioctls to admin
  tcp/udp: Fix memory leak in ipv6_renew_options().
  iommu/vt-d: Fix PCI device refcount leak in dmar_dev_scope_init()
  pinctrl: single: Fix potential division by zero
  ASoC: ops: Fix bounds check for _sx controls
  efi: random: Properly limit the size of the random seed
  arm64: errata: Fix KVM Spectre-v2 mitigation selection for Cortex-A57/A72
  arm64: Fix panic() when Spectre-v2 causes Spectre-BHB to re-allocate KVM vectors
  x86/bugs: Make sure MSR_SPEC_CTRL is updated properly upon resume from S3
  nilfs2: fix NULL pointer dereference in nilfs_palloc_commit_free_entry()
  tools/vm/slabinfo-gnuplot: use "grep -E" instead of "egrep"
  btrfs: qgroup: fix sleep from invalid context bug in btrfs_qgroup_inherit()
  perf: Add sample_flags to indicate the PMU-filled sample data
  hwmon: (coretemp) fix pci device refcount leak in nv1a_ram_new()
  hwmon: (coretemp) Check for null before removing sysfs attrs
  net: ethernet: renesas: ravb: Fix promiscuous mode after system resumed
  packet: do not set TP_STATUS_CSUM_VALID on CHECKSUM_COMPLETE
  net: hsr: Fix potential use-after-free
  dsa: lan9303: Correct stat name
  net/9p: Fix a potential socket leak in p9_socket_open
  net: net_netdev: Fix error handling in ntb_netdev_init_module()
  net: phy: fix null-ptr-deref while probe() failed
  qlcnic: fix sleep-in-atomic-context bugs caused by msleep
  can: cc770: cc770_isa_probe(): add missing free_cc770dev()
  can: sja1000_isa: sja1000_isa_probe(): add missing free_sja1000dev()
  net/mlx5: Fix uninitialized variable bug in outlen_write()
  of: property: decrement node refcount in of_fwnode_get_reference_args()
  hwmon: (ibmpex) Fix possible UAF when ibmpex_register_bmc() fails
  hwmon: (i5500_temp) fix missing pci_disable_device()
  iio: light: rpr0521: add missing Kconfig dependencies
  iio: health: afe4404: Fix oob read in afe4404_[read|write]_raw
  iio: health: afe4403: Fix oob read in afe4403_read_raw
  drm/amdgpu: always register an MMU notifier for userptr
  net: usb: qmi_wwan: add Telit 0x103a composition
  tcp: configurable source port perturb table size
  platform/x86: hp-wmi: Ignore Smart Experience App event
  platform/x86: acer-wmi: Enable SW_TABLET_MODE on Switch V 10 (SW5-017)
  platform/x86: asus-wmi: add missing pci_dev_put() in asus_wmi_set_xusb2pr()
  xen/platform-pci: add missing free_irq() in error path
  serial: 8250: 8250_omap: Avoid RS485 RTS glitch on ->set_termios()
  Input: synaptics - switch touchpad on HP Laptop 15-da3001TU to RMI mode
  nilfs2: fix nilfs_sufile_mark_dirty() not set segment usage as dirty
  kconfig: display recursive dependency resolution hint just once
  iio: core: Fix entry not deleted when iio_register_sw_trigger_type() fails
  iio: light: apds9960: fix wrong register for gesture gain
  arm64: dts: rockchip: lower rk3399-puma-haikou SD controller clock frequency
  nios2: add FORCE for vmlinuz.gz
  s390/crashdump: fix TOD programmable field size
  net: thunderx: Fix the ACPI memory leak
  nfc: st-nci: fix memory leaks in EVT_TRANSACTION
  nfc: st-nci: fix incorrect validating logic in EVT_TRANSACTION
  s390/dasd: fix no record found for raw_track_access
  dccp/tcp: Reset saddr on failure after inet6?_hash_connect().
  NFC: nci: fix memory leak in nci_rx_data_packet()
  xfrm: Fix ignored return value in xfrm6_init()
  net/qla3xxx: fix potential memleak in ql3xxx_send()
  net/mlx4: Check retval of mlx4_bitmap_init
  ARM: mxs: fix memory leak in mxs_machine_init()
  9p/fd: fix issue of list_del corruption in p9_fd_cancel()
  net: pch_gbe: fix potential memleak in pch_gbe_tx_queue()
  nfc/nci: fix race with opening and closing
  ARM: dts: at91: sam9g20ek: enable udc vbus gpio pinctrl
  bus: sunxi-rsb: Support atomic transfers
  ARM: dts: am335x-pcm-953: Define fixed regulators in root node
  af_key: Fix send_acquire race with pfkey_register
  MIPS: pic32: treat port as signed integer
  spi: stm32: fix stm32_spi_prepare_mbr() that halves spi clk for every run
  wifi: mac80211: Fix ack frame idr leak when mesh has no route
  audit: fix undefined behavior in bit shift for AUDIT_BIT
  wifi: mac80211_hwsim: fix debugfs attribute ps with rc table support
  Linux 4.14.300
  ntfs: check overflow when iterating ATTR_RECORDs
  ntfs: fix out-of-bounds read in ntfs_attr_find()
  ntfs: fix use-after-free in ntfs_attr_find()
  mm: fs: initialize fsdata passed to write_begin/write_end interface
  9p/trans_fd: always use O_NONBLOCK read/write
  gfs2: Switch from strlcpy to strscpy
  gfs2: Check sb_bsize_shift after reading superblock
  9p: trans_fd/p9_conn_cancel: drop client lock earlier
  kcm: close race conditions on sk_receive_queue
  bpf, test_run: Fix alignment problem in bpf_prog_test_run_skb()
  kcm: avoid potential race in kcm_tx_work
  tcp: cdg: allow tcp_cdg_release() to be called multiple times
  macvlan: enforce a consistent minimal mtu
  serial: 8250: Flush DMA Rx on RLSI
  nilfs2: fix use-after-free bug of ns_writer on remount
  misc/vmw_vmci: fix an infoleak in vmci_host_do_receive_datagram()
  mmc: sdhci-pci: Fix possible memory leak caused by missing pci_dev_put()
  mmc: core: properly select voltage range without power cycle
  serial: 8250_lpss: Configure DMA also w/o DMA filter
  serial: 8250: Fall back to non-DMA Rx if IIR_RDI occurs
  dm ioctl: fix misbehavior if list_versions races with module loading
  iio: pressure: ms5611: changed hardcoded SPI speed to value limited
  iio: trigger: sysfs: fix possible memory leak in iio_sysfs_trig_init()
  iio: adc: at91_adc: fix possible memory leak in at91_adc_allocate_trigger()
  usb: chipidea: fix deadlock in ci_otg_del_timer
  usb: add NO_LPM quirk for Realforce 87U Keyboard
  USB: serial: option: add Fibocom FM160 0x0111 composition
  USB: serial: option: add u-blox LARA-L6 modem
  USB: serial: option: add u-blox LARA-R6 00B modem
  USB: serial: option: remove old LARA-R6 PID
  USB: serial: option: add Sierra Wireless EM9191
  ALSA: usb-audio: Drop snd_BUG_ON() from snd_usbmidi_output_open()
  ring_buffer: Do not deactivate non-existant pages
  ftrace: Fix null pointer dereference in ftrace_add_mod()
  ftrace: Optimize the allocation for mcount entries
  ftrace: Fix the possible incorrect kernel message
  cifs: Fix wrong return value checking when GETFLAGS
  net/x25: Fix skb leak in x25_lapb_receive_frame()
  drbd: use after free in drbd_create_device()
  xen/pcpu: fix possible memory leak in register_pcpu()
  net: caif: fix double disconnect client in chnl_net_open()
  mISDN: fix misuse of put_device() in mISDN_register_device()
  mISDN: fix possible memory leak in mISDN_dsp_element_register()
  net: bgmac: Drop free_netdev() from bgmac_enet_remove()
  pinctrl: devicetree: fix null pointer dereferencing in pinctrl_dt_to_map
  parport_pc: Avoid FIFO port location truncation
  block: sed-opal: kmalloc the cmd/resp buffers
  ASoC: soc-utils: Remove __exit for snd_soc_util_exit()
  tty: n_gsm: fix sleep-in-atomic-context bug in gsm_control_send
  serial: 8250: omap: Flush PM QOS work on remove
  serial: 8250_omap: remove wait loop from Errata i202 workaround
  ASoC: core: Fix use-after-free in snd_soc_exit()
  Bluetooth: L2CAP: Fix l2cap_global_chan_by_psm
  drm/imx: imx-tve: Fix return type of imx_tve_connector_mode_valid
  selftests/futex: fix build for clang
  x86/cpu: Restore AMD's DE_CFG MSR after resume
  dmaengine: at_hdmac: Check return code of dma_async_device_register
  dmaengine: at_hdmac: Fix impossible condition
  dmaengine: at_hdmac: Don't allow CPU to reorder channel enable
  dmaengine: at_hdmac: Fix completion of unissued descriptor in case of errors
  dmaengine: at_hdmac: Don't start transactions at tx_submit level
  dmaengine: at_hdmac: Fix at_lli struct definition
  cert host tools: Stop complaining about deprecated OpenSSL functions
  udf: Fix a slab-out-of-bounds write bug in udf_find_entry()
  btrfs: selftests: fix wrong error check in btrfs_free_dummy_root()
  platform/x86: hp_wmi: Fix rfkill causing soft blocked wifi
  drm/i915/dmabuf: fix sg_table handling in map_dma_buf
  nilfs2: fix deadlock in nilfs_count_free_blocks()
  ALSA: usb-audio: Add quirk entry for M-Audio Micro
  ALSA: hda: fix potential memleak in 'add_widget_node'
  arm64: efi: Fix handling of misaligned runtime regions and drop warning
  net: macvlan: fix memory leaks of macvlan_common_newlink
  net: mv643xx_eth: disable napi when init rxq or txq failed in mv643xx_eth_open()
  ethernet: s2io: disable napi when start nic failed in s2io_card_up()
  net: cxgb3_main: disable napi when bind qsets failed in cxgb_up()
  drivers: net: xgene: disable napi when register irq failed in xgene_enet_open()
  dmaengine: mv_xor_v2: Fix a resource leak in mv_xor_v2_remove()
  tipc: fix the msg->req tlv len check in tipc_nl_compat_name_table_dump_header
  ipv6: addrlabel: fix infoleak when sending struct ifaddrlblmsg to network
  drm/vc4: Fix missing platform_unregister_drivers() call in vc4_drm_register()
  hamradio: fix issue of dev reference count leakage in bpq_device_event()
  net: lapbether: fix issue of dev reference count leakage in lapbeth_device_event()
  capabilities: fix undefined behavior in bit shift for CAP_TO_MASK
  net: fman: Unregister ethernet device on removal
  bnxt_en: fix potentially incorrect return value for ndo_rx_flow_steer
  net: gso: fix panic on frag_list with mixed head alloc types
  HID: hyperv: fix possible memory leak in mousevsc_probe()
  Linux 4.14.299
  wifi: brcmfmac: Fix potential buffer overflow in brcmf_fweh_event_worker()
  linux/bits.h: make BIT(), GENMASK(), and friends available in assembly
  linux/const.h: move UL() macro to include/linux/const.h
  linux/const.h: prefix include guard of uapi/linux/const.h with _UAPI
  KVM: x86: emulator: update the emulation mode after CR0 write
  KVM: x86: emulator: introduce emulator_recalc_and_set_mode
  KVM: x86: emulator: em_sysexit should update ctxt->mode
  KVM: x86: Mask off reserved bits in CPUID.80000008H
  ext4: fix warning in 'ext4_da_release_space'
  parisc: Export iosapic_serial_irq() symbol for serial port driver
  parisc: Make 8250_gsc driver dependend on CONFIG_PARISC
  efi: random: reduce seed size to 32 bytes
  ALSA: usb-audio: Add quirks for MacroSilicon MS2100/MS2106 devices
  capabilities: fix potential memleak on error path from vfs_getxattr_alloc()
  tcp/udp: Make early_demux back namespacified.
  btrfs: fix type of parameter generation in btrfs_get_dentry
  block, bfq: protect 'bfqd->queued' by 'bfqd->lock'
  Bluetooth: L2CAP: Fix attempting to access uninitialized memory
  i2c: xiic: Add platform module alias
  media: dvb-frontends/drxk: initialize err to 0
  media: s5p_cec: limit msg.len to CEC_MAX_MSG_SIZE
  net, neigh: Fix null-ptr-deref in neigh_table_clear()
  net: mdio: fix undefined behavior in bit shift for __mdiobus_register
  Bluetooth: L2CAP: fix use-after-free in l2cap_conn_del()
  Bluetooth: L2CAP: Fix use-after-free caused by l2cap_reassemble_sdu
  btrfs: fix ulist leaks in error paths of qgroup self tests
  btrfs: fix inode list leak during backref walking at resolve_indirect_refs()
  isdn: mISDN: netjet: fix wrong check of device registration
  mISDN: fix possible memory leak in mISDN_register_device()
  rose: Fix NULL pointer dereference in rose_send_frame()
  ipvs: use explicitly signed chars
  net: sched: Fix use after free in red_enqueue()
  ata: pata_legacy: fix pdc20230_set_piomode()
  net: fec: fix improper use of NETDEV_TX_BUSY
  nfc: nfcmrvl: Fix potential memory leak in nfcmrvl_i2c_nci_send()
  nfc: s3fwrn5: Fix potential memory leak in s3fwrn5_nci_send()
  net: dsa: Fix possible memory leaks in dsa_loop_init()
  nfs4: Fix kmemleak when allocate slot failed
  NFSv4.1: We must always send RECLAIM_COMPLETE after a reboot
  NFSv4.1: Handle RECLAIM_COMPLETE trunking errors
  UPSTREAM: linux/const.h: move UL() macro to include/linux/const.h
  UPSTREAM: linux/const.h: prefix include guard of uapi/linux/const.h with _UAPI
  UPSTREAM: linux/bits.h: make BIT(), GENMASK(), and friends available in assembly
  Linux 4.14.298
  can: rcar_canfd: rcar_canfd_handle_global_receive(): fix IRQ storm on global FIFO receive
  net: ehea: fix possible memory leak in ehea_register_port()
  openvswitch: switch from WARN to pr_warn
  ALSA: aoa: Fix I2S device accounting
  ALSA: aoa: i2sbus: fix possible memory leak in i2sbus_add_dev()
  PM: domains: Fix handling of unavailable/disabled idle states
  net: ksz884x: fix missing pci_disable_device() on error in pcidev_init()
  i40e: Fix flow-type by setting GL_HASH_INSET registers
  i40e: Fix ethtool rx-flow-hash setting for X722
  media: videodev2.h: V4L2_DV_BT_BLANKING_HEIGHT should check 'interlaced'
  media: v4l2-dv-timings: add sanity checks for blanking values
  media: vivid: dev->bitmap_cap wasn't freed in all cases
  media: vivid: s_fbuf: add more sanity checks
  PM: hibernate: Allow hybrid sleep to work with s2idle
  can: mscan: mpc5xxx: mpc5xxx_can_probe(): add missing put_clock() in error path
  tcp: fix indefinite deferral of RTO with SACK reneging
  net: lantiq_etop: don't free skb when returning NETDEV_TX_BUSY
  kcm: annotate data-races around kcm->rx_wait
  kcm: annotate data-races around kcm->rx_psock
  amd-xgbe: add the bit rate quirk for Molex cables
  amd-xgbe: fix the SFP compliance codes check for DAC cables
  x86/unwind/orc: Fix unreliable stack dump with gcov
  ALSA: ac97: fix possible memory leak in snd_ac97_dev_register()
  arc: iounmap() arg is volatile
  drm/msm: Fix return type of mdp4_lvds_connector_mode_valid
  net: ieee802154: fix error return code in dgram_bind()
  mm,hugetlb: take hugetlb_lock before decrementing h->resv_huge_pages
  xen/gntdev: Prevent leaking grants
  Xen/gntdev: don't ignore kernel unmapping error
  s390/futex: add missing EX_TABLE entry to __futex_atomic_op()
  kernfs: fix use-after-free in __kernfs_remove
  mmc: core: Fix kernel panic when remove non-standard SDIO card
  drm/msm/hdmi: fix memory corruption with too many bridges
  mac802154: Fix LQI recording
  fbdev: smscufx: Fix several use-after-free bugs
  iio: light: tsl2583: Fix module unloading
  tools: iio: iio_utils: fix digit calculation
  xhci: Remove device endpoints from bandwidth list when freeing the device
  usb: xhci: add XHCI_SPURIOUS_SUCCESS to ASM1042 despite being a V0.96 controller
  usb: bdc: change state when port disconnected
  usb: dwc3: gadget: Don't set IMI for no_interrupt
  USB: add RESET_RESUME quirk for NVIDIA Jetson devices in RCM
  ALSA: au88x0: use explicitly signed char
  ALSA: Use del_timer_sync() before freeing timer
  ACPI: video: Force backlight native for more TongFang devices
  media: v4l2-mem2mem: Apply DST_QUEUE_OFF_BASE on MMAP buffers across ioctls
  iommu/vt-d: Clean up si_domain in the init_dmars() error path
  net: hns: fix possible memory leak in hnae_ae_register()
  net/atm: fix proc_mpc_write incorrect return value
  HID: magicmouse: Do not set BTN_MOUSE on double report
  ACPI: extlog: Handle multiple records
  btrfs: fix processing of delayed data refs during backref walking
  r8152: add PID for the Lenovo OneLink+ Dock
  arm64: errata: Remove AES hwcap for COMPAT tasks
  KVM: arm64: vgic: Fix exit condition in scan_its_table()
  ata: ahci: Match EM_MAX_SLOTS with SATA_PMP_MAX_PORTS
  ata: ahci-imx: Fix MODULE_ALIAS
  x86/microcode/AMD: Apply the patch early on every logical thread
  ocfs2: fix BUG when iput after ocfs2_mknod fails
  ocfs2: clear dinode links count in case of error
  Linux 4.14.297
  x86/speculation: Add RSB VM Exit protections
  x86/bugs: Warn when "ibrs" mitigation is selected on Enhanced IBRS parts
  x86/speculation: Use DECLARE_PER_CPU for x86_spec_ctrl_current
  x86/speculation: Disable RRSBA behavior
  x86/bugs: Add Cannon lake to RETBleed affected CPU list
  x86/cpu/amd: Enumerate BTC_NO
  x86/common: Stamp out the stepping madness
  x86/speculation: Fill RSB on vmexit for IBRS
  KVM: VMX: Fix IBRS handling after vmexit
  KVM: VMX: Prevent guest RSB poisoning attacks with eIBRS
  x86/speculation: Remove x86_spec_ctrl_mask
  x86/speculation: Use cached host SPEC_CTRL value for guest entry/exit
  x86/speculation: Fix SPEC_CTRL write on SMT state change
  x86/speculation: Fix firmware entry SPEC_CTRL handling
  x86/speculation: Fix RSB filling with CONFIG_RETPOLINE=n
  x86/speculation: Add LFENCE to RSB fill sequence
  x86/speculation: Change FILL_RETURN_BUFFER to work with objtool
  entel_idle: Disable IBRS during long idle
  x86/bugs: Report Intel retbleed vulnerability
  x86/bugs: Split spectre_v2_select_mitigation() and spectre_v2_user_select_mitigation()
  x86/speculation: Add spectre_v2=ibrs option to support Kernel IBRS
  x86/bugs: Optimize SPEC_CTRL MSR writes
  x86/entry: Add kernel IBRS implementation
  x86/bugs: Keep a per-CPU IA32_SPEC_CTRL value
  x86/bugs: Add AMD retbleed= boot parameter
  x86/bugs: Report AMD retbleed vulnerability
  x86/cpufeatures: Move RETPOLINE flags to word 11
  x86/entry: Remove skip_r11rcx
  x86/cpu: Add a steppings field to struct x86_cpu_id
  x86/cpu: Add consistent CPU match macros
  x86/devicetable: Move x86 specific macro out of generic code
  x86/cpufeature: Fix various quality problems in the <asm/cpu_device_hd.h> header
  x86/cpufeature: Add facility to check for min microcode revisions
  Revert "x86/cpu: Add a steppings field to struct x86_cpu_id"

 Conflicts:
	arch/arm64/Kconfig
	arch/arm64/include/asm/cpucaps.h
	drivers/mmc/host/sdhci.c
	scripts/kconfig/symbol.c

Change-Id: I19dc4fe4d3f8ced66f3fd61dd25de82e19d1e3a1
This commit is contained in:
Michael Bestas
2023-01-08 11:50:57 +02:00
278 changed files with 2612 additions and 1054 deletions

View File

@@ -422,6 +422,14 @@ The possible values in this file are:
'RSB filling' Protection of RSB on context switch enabled
============= ===========================================
- EIBRS Post-barrier Return Stack Buffer (PBRSB) protection status:
=========================== =======================================================
'PBRSB-eIBRS: SW sequence' CPU is affected and protection of RSB on VMEXIT enabled
'PBRSB-eIBRS: Vulnerable' CPU is vulnerable
'PBRSB-eIBRS: Not affected' CPU is not affected by PBRSB
=========================== =======================================================
Full mitigation might require a microcode update from the CPU
vendor. When the necessary microcode is not available, the kernel will
report vulnerability.

View File

@@ -4007,6 +4007,18 @@
retain_initrd [RAM] Keep initrd memory after extraction
retbleed= [X86] Control mitigation of RETBleed (Arbitrary
Speculative Code Execution with Return Instructions)
vulnerability.
off - unconditionally disable
auto - automatically select a migitation
Selecting 'auto' will choose a mitigation method at run
time according to the CPU.
Not specifying this option is equivalent to retbleed=auto.
rfkill.default_state=
0 "airplane mode". All wifi, bluetooth, wimax, gps, fm,
etc. communication is blocked by default.
@@ -4246,6 +4258,7 @@
eibrs - enhanced IBRS
eibrs,retpoline - enhanced IBRS + Retpolines
eibrs,lfence - enhanced IBRS + LFENCE
ibrs - use IBRS to protect kernel
Not specifying this option is equivalent to
spectre_v2=auto.

View File

@@ -53,7 +53,9 @@ stable kernels.
| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
| ARM | Cortex-A57 | #852523 | N/A |
| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
| ARM | Cortex-A57 | #1742098 | ARM64_ERRATUM_1742098 |
| ARM | Cortex-A72 | #853709 | N/A |
| ARM | Cortex-A72 | #1655431 | ARM64_ERRATUM_1742098 |
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
| ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 |

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 296
SUBLEVEL = 301
EXTRAVERSION =
NAME = Petit Gorille

View File

@@ -35,7 +35,7 @@ static inline void ioport_unmap(void __iomem *addr)
{
}
extern void iounmap(const void __iomem *addr);
extern void iounmap(const volatile void __iomem *addr);
#define ioremap_nocache(phy, sz) ioremap(phy, sz)
#define ioremap_wc(phy, sz) ioremap(phy, sz)

View File

@@ -95,7 +95,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
EXPORT_SYMBOL(ioremap_prot);
void iounmap(const void __iomem *addr)
void iounmap(const volatile void __iomem *addr)
{
/* weird double cast to handle phys_addr_t > 32 bits */
if (arc_uncached_addr_space((phys_addr_t)(u32)addr))

View File

@@ -15,22 +15,20 @@
compatible = "phytec,am335x-pcm-953", "phytec,am335x-phycore-som", "ti,am33xx";
/* Power */
regulators {
vcc3v3: fixedregulator@1 {
compatible = "regulator-fixed";
regulator-name = "vcc3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
};
vcc3v3: fixedregulator1 {
compatible = "regulator-fixed";
regulator-name = "vcc3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
};
vcc1v8: fixedregulator@2 {
compatible = "regulator-fixed";
regulator-name = "vcc1v8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;
};
vcc1v8: fixedregulator2 {
compatible = "regulator-fixed";
regulator-name = "vcc1v8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;
};
/* User IO */

View File

@@ -39,6 +39,13 @@
};
usb1 {
pinctrl_usb1_vbus_gpio: usb1_vbus_gpio {
atmel,pins =
<AT91_PIOC 5 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>; /* PC5 GPIO */
};
};
mmc0_slot1 {
pinctrl_board_mmc0_slot1: mmc0_slot1-board {
atmel,pins =
@@ -72,6 +79,8 @@
};
usb1: gadget@fffa4000 {
pinctrl-0 = <&pinctrl_usb1_vbus_gpio>;
pinctrl-names = "default";
atmel,vbus-gpio = <&pioC 5 GPIO_ACTIVE_HIGH>;
status = "okay";
};

View File

@@ -22,12 +22,6 @@
#include <mach/memory.h>
#endif
/*
* Allow for constants defined here to be used from assembly code
* by prepending the UL suffix only with actual C code compilation.
*/
#define UL(x) _AC(x, UL)
/* PAGE_OFFSET - the virtual address of the start of the kernel image */
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)

View File

@@ -393,8 +393,10 @@ static void __init mxs_machine_init(void)
root = of_find_node_by_path("/");
ret = of_property_read_string(root, "model", &soc_dev_attr->machine);
if (ret)
if (ret) {
kfree(soc_dev_attr);
return;
}
soc_dev_attr->family = "Freescale MXS Family";
soc_dev_attr->soc_id = mxs_get_soc_id();

View File

@@ -180,7 +180,7 @@
cap-sd-highspeed;
cd-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>;
disable-wp;
max-frequency = <150000000>;
max-frequency = <40000000>;
pinctrl-names = "default";
pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
vmmc-supply = <&vcc3v3_baseboard>;

View File

@@ -48,7 +48,8 @@
#define ARM64_HW_DBM 28
#define ARM64_WORKAROUND_1188873 29
#define ARM64_SPECTRE_BHB 30
#define ARM64_WORKAROUND_1742098 31
#define ARM64_NCAPS 31
#define ARM64_NCAPS 32
#endif /* __ASM_CPUCAPS_H */

View File

@@ -28,12 +28,6 @@
#include <asm/page-def.h>
#include <asm/sizes.h>
/*
* Allow for constants defined here to be used from assembly code
* by prepending the UL suffix only with actual C code compilation.
*/
#define UL(x) _AC(x, UL)
/*
* Size of the PCI I/O space. This must remain a power of two so that
* IO_SPACE_LIMIT acts as a mask for the low bits of I/O addresses.

View File

@@ -135,9 +135,12 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
}
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
__this_cpu_write(bp_hardening_data.fn, fn);
__this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
if (fn != __this_cpu_read(bp_hardening_data.fn)) {
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
__this_cpu_write(bp_hardening_data.fn, fn);
__this_cpu_write(bp_hardening_data.template_start,
hyp_vecs_start);
}
spin_unlock(&bp_lock);
}
#else
@@ -570,6 +573,14 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
return (need_wa > 0);
}
#ifdef CONFIG_ARM64_ERRATUM_1742098
static struct midr_range broken_aarch32_aes[] = {
MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
{},
};
#endif
const struct arm64_cpu_capabilities arm64_errata[] = {
#if defined(CONFIG_ARM64_ERRATUM_826319) || \
defined(CONFIG_ARM64_ERRATUM_827319) || \
@@ -774,6 +785,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.matches = is_spectre_bhb_affected,
.cpu_enable = spectre_bhb_enable_mitigation,
},
#ifdef CONFIG_ARM64_ERRATUM_1742098
{
.desc = "ARM erratum 1742098",
.capability = ARM64_WORKAROUND_1742098,
CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
},
#endif
{
}
};
@@ -1078,8 +1097,11 @@ static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
}
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
__this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
if (hyp_vecs_start != __this_cpu_read(bp_hardening_data.template_start)) {
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
__this_cpu_write(bp_hardening_data.template_start,
hyp_vecs_start);
}
spin_unlock(&bp_lock);
}
#else
@@ -1115,7 +1137,13 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
case 8:
kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start);
/*
* A57/A72-r0 will already have selected the
* spectre-indirect vector, which is sufficient
* for BHB too.
*/
if (!__this_cpu_read(bp_hardening_data.fn))
kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start);
break;
case 24:
kvm_setup_bhb_slot(__spectre_bhb_loop_k24_start);

View File

@@ -30,6 +30,7 @@
#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
#include <asm/hwcap.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
#include <asm/sysreg.h>
@@ -1089,6 +1090,14 @@ static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
}
#endif /* CONFIG_ARM64_SSBD */
static void elf_hwcap_fixup(void)
{
#ifdef CONFIG_ARM64_ERRATUM_1742098
if (cpus_have_const_cap(ARM64_WORKAROUND_1742098))
compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES;
#endif /* ARM64_ERRATUM_1742098 */
}
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -1687,8 +1696,10 @@ void __init setup_cpu_features(void)
mark_const_caps_ready();
setup_elf_hwcaps(arm64_elf_hwcaps);
if (system_supports_32bit_el0())
if (system_supports_32bit_el0()) {
setup_elf_hwcaps(compat_elf_hwcaps);
elf_hwcap_fixup();
}
/* Advertise that we have computed the system capabilities */
set_sys_caps_initialised();

View File

@@ -16,6 +16,14 @@
#include <asm/efi.h>
static bool region_is_misaligned(const efi_memory_desc_t *md)
{
if (PAGE_SIZE == EFI_PAGE_SIZE)
return false;
return !PAGE_ALIGNED(md->phys_addr) ||
!PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT);
}
/*
* Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
* executable, everything else can be mapped with the XN bits
@@ -29,14 +37,22 @@ static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
if (type == EFI_MEMORY_MAPPED_IO)
return PROT_DEVICE_nGnRE;
if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr),
"UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?"))
if (region_is_misaligned(md)) {
static bool __initdata code_is_misaligned;
/*
* If the region is not aligned to the page size of the OS, we
* can not use strict permissions, since that would also affect
* the mapping attributes of the adjacent regions.
* Regions that are not aligned to the OS page size cannot be
* mapped with strict permissions, as those might interfere
* with the permissions that are needed by the adjacent
* region's mapping. However, if we haven't encountered any
* misaligned runtime code regions so far, we can safely use
* non-executable permissions for non-code regions.
*/
return pgprot_val(PAGE_KERNEL_EXEC);
code_is_misaligned |= (type == EFI_RUNTIME_SERVICES_CODE);
return code_is_misaligned ? pgprot_val(PAGE_KERNEL_EXEC)
: pgprot_val(PAGE_KERNEL);
}
/* R-- */
if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
@@ -64,19 +80,16 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
md->type == EFI_RUNTIME_SERVICES_DATA);
if (!PAGE_ALIGNED(md->phys_addr) ||
!PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) {
/*
* If the end address of this region is not aligned to page
* size, the mapping is rounded up, and may end up sharing a
* page frame with the next UEFI memory region. If we create
* a block entry now, we may need to split it again when mapping
* the next region, and support for that is going to be removed
* from the MMU routines. So avoid block mappings altogether in
* that case.
*/
/*
* If this region is not aligned to the page size used by the OS, the
* mapping will be rounded outwards, and may end up sharing a page
* frame with an adjacent runtime memory region. Given that the page
* table descriptor covering the shared page will be rewritten when the
* adjacent region gets mapped, we must avoid block mappings here so we
* don't have to worry about splitting them when that happens.
*/
if (region_is_misaligned(md))
page_mappings_only = true;
}
create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
md->num_pages << EFI_PAGE_SHIFT,
@@ -104,6 +117,9 @@ int __init efi_set_mapping_permissions(struct mm_struct *mm,
BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
md->type != EFI_RUNTIME_SERVICES_DATA);
if (region_is_misaligned(md))
return 0;
/*
* Calling apply_to_page_range() is only safe on regions that are
* guaranteed to be mapped down to pages. Since we are only called

View File

@@ -26,6 +26,6 @@ extern char *fw_getcmdline(void);
extern void fw_meminit(void);
extern char *fw_getenv(char *name);
extern unsigned long fw_getenvl(char *name);
extern void fw_init_early_console(char port);
extern void fw_init_early_console(void);
#endif /* __ASM_FW_H_ */

View File

@@ -34,7 +34,7 @@
#define U_BRG(x) (UART_BASE(x) + 0x40)
static void __iomem *uart_base;
static char console_port = -1;
static int console_port = -1;
static int __init configure_uart_pins(int port)
{
@@ -54,7 +54,7 @@ static int __init configure_uart_pins(int port)
return 0;
}
static void __init configure_uart(char port, int baud)
static void __init configure_uart(int port, int baud)
{
u32 pbclk;
@@ -67,7 +67,7 @@ static void __init configure_uart(char port, int baud)
uart_base + PIC32_SET(U_STA(port)));
}
static void __init setup_early_console(char port, int baud)
static void __init setup_early_console(int port, int baud)
{
if (configure_uart_pins(port))
return;
@@ -137,16 +137,15 @@ _out:
return baud;
}
void __init fw_init_early_console(char port)
void __init fw_init_early_console(void)
{
char *arch_cmdline = pic32_getcmdline();
int baud = -1;
int baud, port;
uart_base = ioremap_nocache(PIC32_BASE_UART, 0xc00);
baud = get_baud_from_cmdline(arch_cmdline);
if (port == -1)
port = get_port_from_cmdline(arch_cmdline);
port = get_port_from_cmdline(arch_cmdline);
if (port == -1)
port = EARLY_CONSOLE_PORT;

View File

@@ -68,7 +68,7 @@ void __init plat_mem_setup(void)
strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
#ifdef CONFIG_EARLY_PRINTK
fw_init_early_console(-1);
fw_init_early_console();
#endif
pic32_config_init();
}

View File

@@ -20,7 +20,7 @@ $(obj)/vmlinux.bin: vmlinux FORCE
$(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip)
$(obj)/vmImage: $(obj)/vmlinux.gz
$(obj)/vmImage: $(obj)/vmlinux.gz FORCE
$(call if_changed,uimage)
@$(kecho) 'Kernel: $@ is ready'

View File

@@ -16,7 +16,8 @@
"3: jl 1b\n" \
" lhi %0,0\n" \
"4: sacf 768\n" \
EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
EX_TABLE(0b,4b) EX_TABLE(1b,4b) \
EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
: "=d" (ret), "=&d" (oldval), "=&d" (newval), \
"=m" (*uaddr) \
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \

View File

@@ -45,7 +45,7 @@ struct save_area {
u64 fprs[16];
u32 fpc;
u32 prefix;
u64 todpreg;
u32 todpreg;
u64 timer;
u64 todcmp;
u64 vxrs_low[16];

View File

@@ -19,12 +19,6 @@
#include <asm/sizes.h>
#include <mach/memory.h>
/*
* Allow for constants defined here to be used from assembly code
* by prepending the UL suffix only with actual C code compilation.
*/
#define UL(x) _AC(x, UL)
/*
* PAGE_OFFSET - the virtual address of the start of the kernel image
* TASK_SIZE - the maximum size of a user space task.

View File

@@ -6,6 +6,8 @@
#include <asm/percpu.h>
#include <asm/asm-offsets.h>
#include <asm/processor-flags.h>
#include <asm/msr.h>
#include <asm/nospec-branch.h>
/*
@@ -146,27 +148,19 @@ For 32-bit we have the following conventions - kernel is built with
.endm
.macro POP_REGS pop_rdi=1 skip_r11rcx=0
.macro POP_REGS pop_rdi=1
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
popq %rbx
.if \skip_r11rcx
popq %rsi
.else
popq %r11
.endif
popq %r10
popq %r9
popq %r8
popq %rax
.if \skip_r11rcx
popq %rsi
.else
popq %rcx
.endif
popq %rdx
popq %rsi
.if \pop_rdi
@@ -336,6 +330,62 @@ For 32-bit we have the following conventions - kernel is built with
#endif
/*
* IBRS kernel mitigation for Spectre_v2.
*
* Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers
* the regs it uses (AX, CX, DX). Must be called before the first RET
* instruction (NOTE! UNTRAIN_RET includes a RET instruction)
*
* The optional argument is used to save/restore the current value,
* which is used on the paranoid paths.
*
* Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set.
*/
.macro IBRS_ENTER save_reg
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
movl $MSR_IA32_SPEC_CTRL, %ecx
.ifnb \save_reg
rdmsr
shl $32, %rdx
or %rdx, %rax
mov %rax, \save_reg
test $SPEC_CTRL_IBRS, %eax
jz .Ldo_wrmsr_\@
lfence
jmp .Lend_\@
.Ldo_wrmsr_\@:
.endif
movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx
movl %edx, %eax
shr $32, %rdx
wrmsr
.Lend_\@:
.endm
/*
* Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX)
* regs. Must be called after the last RET.
*/
.macro IBRS_EXIT save_reg
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
movl $MSR_IA32_SPEC_CTRL, %ecx
.ifnb \save_reg
mov \save_reg, %rdx
.else
movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx
andl $(~SPEC_CTRL_IBRS), %edx
.endif
movl %edx, %eax
shr $32, %rdx
wrmsr
.Lend_\@:
.endm
/*
* Mitigate Spectre v1 for conditional swapgs code paths.
*

View File

@@ -245,7 +245,6 @@ ENTRY(__switch_to_asm)
movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
#endif
#ifdef CONFIG_RETPOLINE
/*
* When switching from a shallower to a deeper call stack
* the RSB may either underflow or use entries populated
@@ -254,7 +253,6 @@ ENTRY(__switch_to_asm)
* speculative execution to prevent attack.
*/
FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
#endif
/* restore callee-saved registers */
popfl

View File

@@ -230,6 +230,10 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
/* IRQs are off. */
movq %rsp, %rdi
/* clobbers %rax, make sure it is after saving the syscall nr */
IBRS_ENTER
call do_syscall_64 /* returns with IRQs disabled */
TRACE_IRQS_IRETQ /* we're about to change IF */
@@ -301,8 +305,8 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
* perf profiles. Nothing jumps here.
*/
syscall_return_via_sysret:
/* rcx and r11 are already restored (see code above) */
POP_REGS pop_rdi=0 skip_r11rcx=1
IBRS_EXIT
POP_REGS pop_rdi=0
/*
* Now all regs are restored except RSP and RDI.
@@ -353,7 +357,6 @@ ENTRY(__switch_to_asm)
movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
#endif
#ifdef CONFIG_RETPOLINE
/*
* When switching from a shallower to a deeper call stack
* the RSB may either underflow or use entries populated
@@ -362,7 +365,6 @@ ENTRY(__switch_to_asm)
* speculative execution to prevent attack.
*/
FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
#endif
/* restore callee-saved registers */
popfq
@@ -591,6 +593,7 @@ GLOBAL(retint_user)
TRACE_IRQS_IRETQ
GLOBAL(swapgs_restore_regs_and_return_to_usermode)
IBRS_EXIT
#ifdef CONFIG_DEBUG_ENTRY
/* Assert that pt_regs indicates user mode. */
testb $3, CS(%rsp)
@@ -1134,6 +1137,9 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1
* Save all registers in pt_regs, and switch gs if needed.
* Use slow, but surefire "are we in kernel?" check.
* Return: ebx=0: need swapgs on exit, ebx=1: otherwise
*
* R14 - old CR3
* R15 - old SPEC_CTRL
*/
ENTRY(paranoid_entry)
UNWIND_HINT_FUNC
@@ -1157,6 +1163,12 @@ ENTRY(paranoid_entry)
*/
FENCE_SWAPGS_KERNEL_ENTRY
/*
* Once we have CR3 and %GS setup save and set SPEC_CTRL. Just like
* CR3 above, keep the old value in a callee saved register.
*/
IBRS_ENTER save_reg=%r15
ret
END(paranoid_entry)
@@ -1171,9 +1183,19 @@ END(paranoid_entry)
* to try to handle preemption here.
*
* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
*
* R14 - old CR3
* R15 - old SPEC_CTRL
*/
ENTRY(paranoid_exit)
UNWIND_HINT_REGS
/*
* Must restore IBRS state before both CR3 and %GS since we need access
* to the per-CPU x86_spec_ctrl_shadow variable.
*/
IBRS_EXIT save_reg=%r15
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF_DEBUG
testl %ebx, %ebx /* swapgs needed? */
@@ -1208,8 +1230,10 @@ ENTRY(error_entry)
FENCE_SWAPGS_USER_ENTRY
/* We have user CR3. Change to kernel CR3. */
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
IBRS_ENTER
.Lerror_entry_from_usermode_after_swapgs:
/* Put us onto the real thread stack. */
popq %r12 /* save return addr in %12 */
movq %rsp, %rdi /* arg0 = pt_regs pointer */
@@ -1272,6 +1296,7 @@ ENTRY(error_entry)
SWAPGS
FENCE_SWAPGS_USER_ENTRY
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
IBRS_ENTER
/*
* Pretend that the exception came from user mode: set up pt_regs
@@ -1377,6 +1402,8 @@ ENTRY(nmi)
PUSH_AND_CLEAR_REGS rdx=(%rdx)
ENCODE_FRAME_POINTER
IBRS_ENTER
/*
* At this point we no longer need to worry about stack damage
* due to nesting -- we're on the normal thread stack and we're
@@ -1600,6 +1627,9 @@ end_repeat_nmi:
movq $-1, %rsi
call do_nmi
/* Always restore stashed SPEC_CTRL value (see paranoid_entry) */
IBRS_EXIT save_reg=%r15
RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
testl %ebx, %ebx /* swapgs needed? */

View File

@@ -4,7 +4,6 @@
*
* Copyright 2000-2002 Andi Kleen, SuSE Labs.
*/
#include "calling.h"
#include <asm/asm-offsets.h>
#include <asm/current.h>
#include <asm/errno.h>
@@ -17,6 +16,8 @@
#include <linux/linkage.h>
#include <linux/err.h>
#include "calling.h"
.section .entry.text, "ax"
/*
@@ -106,6 +107,8 @@ ENTRY(entry_SYSENTER_compat)
xorl %r15d, %r15d /* nospec r15 */
cld
IBRS_ENTER
/*
* SYSENTER doesn't filter flags, so we need to clear NT and AC
* ourselves. To save a few cycles, we can check whether
@@ -250,6 +253,8 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
*/
TRACE_IRQS_OFF
IBRS_ENTER
movq %rsp, %rdi
call do_fast_syscall_32
/* XEN PV guests always use IRET path */
@@ -259,6 +264,9 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
/* Opportunistic SYSRET */
sysret32_from_system_call:
TRACE_IRQS_ON /* User mode traces as IRQs on. */
IBRS_EXIT
movq RBX(%rsp), %rbx /* pt_regs->rbx */
movq RBP(%rsp), %rbp /* pt_regs->rbp */
movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */
@@ -385,6 +393,8 @@ ENTRY(entry_INT80_compat)
*/
TRACE_IRQS_OFF
IBRS_ENTER
movq %rsp, %rdi
call do_int80_syscall_32
.Lsyscall_32_done:

View File

@@ -1,13 +1,172 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _CPU_DEVICE_ID
#define _CPU_DEVICE_ID 1
#ifndef _ASM_X86_CPU_DEVICE_ID
#define _ASM_X86_CPU_DEVICE_ID
/*
* Declare drivers belonging to specific x86 CPUs
* Similar in spirit to pci_device_id and related PCI functions
*
* The wildcard initializers are in mod_devicetable.h because
* file2alias needs them. Sigh.
*/
#include <linux/mod_devicetable.h>
/* Get the INTEL_FAM* model defines */
#include <asm/intel-family.h>
/* And the X86_VENDOR_* ones */
#include <asm/processor.h>
/* Centaur FAM6 models */
#define X86_CENTAUR_FAM6_C7_A 0xa
#define X86_CENTAUR_FAM6_C7_D 0xd
#define X86_CENTAUR_FAM6_NANO 0xf
/**
* X86_MATCH_VENDOR_FAM_MODEL_FEATURE - Base macro for CPU matching
* @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
* The name is expanded to X86_VENDOR_@_vendor
* @_family: The family number or X86_FAMILY_ANY
* @_model: The model number, model constant or X86_MODEL_ANY
* @_feature: A X86_FEATURE bit or X86_FEATURE_ANY
* @_data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
*
* Use only if you need all selectors. Otherwise use one of the shorter
* macros of the X86_MATCH_* family. If there is no matching shorthand
* macro, consider to add one. If you really need to wrap one of the macros
* into another macro at the usage site for good reasons, then please
* start this local macro with X86_MATCH to allow easy grepping.
*/
#define X86_MATCH_VENDOR_FAM_MODEL_FEATURE(_vendor, _family, _model, \
_feature, _data) { \
.vendor = X86_VENDOR_##_vendor, \
.family = _family, \
.model = _model, \
.feature = _feature, \
.driver_data = (unsigned long) _data \
}
/**
* X86_MATCH_VENDOR_FAM_FEATURE - Macro for matching vendor, family and CPU feature
* @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
* The name is expanded to X86_VENDOR_@vendor
* @family: The family number or X86_FAMILY_ANY
* @feature: A X86_FEATURE bit
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
*
* All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
* set to wildcards.
*/
#define X86_MATCH_VENDOR_FAM_FEATURE(vendor, family, feature, data) \
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, \
X86_MODEL_ANY, feature, data)
/**
* X86_MATCH_VENDOR_FEATURE - Macro for matching vendor and CPU feature
* @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
* The name is expanded to X86_VENDOR_@vendor
* @feature: A X86_FEATURE bit
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
*
* All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
* set to wildcards.
*/
#define X86_MATCH_VENDOR_FEATURE(vendor, feature, data) \
X86_MATCH_VENDOR_FAM_FEATURE(vendor, X86_FAMILY_ANY, feature, data)
/**
* X86_MATCH_FEATURE - Macro for matching a CPU feature
* @feature: A X86_FEATURE bit
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
*
* All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
* set to wildcards.
*/
#define X86_MATCH_FEATURE(feature, data) \
X86_MATCH_VENDOR_FEATURE(ANY, feature, data)
/* Transitional to keep the existing code working */
#define X86_FEATURE_MATCH(feature) X86_MATCH_FEATURE(feature, NULL)
/**
* X86_MATCH_VENDOR_FAM_MODEL - Match vendor, family and model
* @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
* The name is expanded to X86_VENDOR_@vendor
* @family: The family number or X86_FAMILY_ANY
* @model: The model number, model constant or X86_MODEL_ANY
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
*
* All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
* set to wildcards.
*/
#define X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, data) \
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, model, \
X86_FEATURE_ANY, data)
/**
* X86_MATCH_VENDOR_FAM - Match vendor and family
* @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
* The name is expanded to X86_VENDOR_@vendor
* @family: The family number or X86_FAMILY_ANY
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
*
* All other missing arguments to X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
* set of wildcards.
*/
#define X86_MATCH_VENDOR_FAM(vendor, family, data) \
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, X86_MODEL_ANY, data)
/**
* X86_MATCH_INTEL_FAM6_MODEL - Match vendor INTEL, family 6 and model
* @model: The model name without the INTEL_FAM6_ prefix or ANY
* The model name is expanded to INTEL_FAM6_@model internally
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
*
* The vendor is set to INTEL, the family to 6 and all other missing
* arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are set to wildcards.
*
* See X86_MATCH_VENDOR_FAM_MODEL_FEATURE() for further information.
*/
#define X86_MATCH_INTEL_FAM6_MODEL(model, data) \
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, INTEL_FAM6_##model, data)
/*
* Match specific microcode revisions.
*
* vendor/family/model/stepping must be all set.
*
* Only checks against the boot CPU. When mixed-stepping configs are
* valid for a CPU model, add a quirk for every valid stepping and
* do the fine-tuning in the quirk handler.
*/
#include <linux/mod_devicetable.h>
struct x86_cpu_desc {
u8 x86_family;
u8 x86_vendor;
u8 x86_model;
u8 x86_stepping;
u32 x86_microcode_rev;
};
#define INTEL_CPU_DESC(model, stepping, revision) { \
.x86_family = 6, \
.x86_vendor = X86_VENDOR_INTEL, \
.x86_model = (model), \
.x86_stepping = (stepping), \
.x86_microcode_rev = (revision), \
}
#define X86_STEPPINGS(mins, maxs) GENMASK(maxs, mins)
@@ -37,5 +196,6 @@
}
extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
extern bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table);
#endif
#endif /* _ASM_X86_CPU_DEVICE_ID */

View File

@@ -202,8 +202,8 @@
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_LFENCE ( 7*32+13) /* "" Use LFENCE for Spectre variant 2 */
#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */
#define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
@@ -283,6 +283,16 @@
#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
/* FREE! (11*32+ 6) */
/* FREE! (11*32+ 7) */
/* FREE! (11*32+ 8) */
/* FREE! (11*32+ 9) */
/* FREE! (11*32+10) */
#define X86_FEATURE_RRSBA_CTRL (11*32+11) /* "" RET prediction control */
#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
#define X86_FEATURE_MSR_TSX_CTRL (11*32+18) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
@@ -295,6 +305,7 @@
#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
@@ -395,5 +406,7 @@
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
#define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */
#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
#endif /* _ASM_X86_CPUFEATURES_H */

View File

@@ -16,6 +16,9 @@
* that group keep the CPUID for the variants sorted by model number.
*/
/* Wildcard match for FAM6 so X86_MATCH_INTEL_FAM6_MODEL(ANY) works */
#define INTEL_FAM6_ANY X86_MODEL_ANY
#define INTEL_FAM6_CORE_YONAH 0x0E
#define INTEL_FAM6_CORE2_MEROM 0x0F
@@ -103,4 +106,7 @@
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */
/* Family 5 */
#define INTEL_FAM5_QUARK_X1000 0x09 /* Quark X1000 SoC */
#endif /* _ASM_X86_INTEL_FAMILY_H */

View File

@@ -47,6 +47,8 @@
#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
#define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */
#define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
@@ -73,6 +75,7 @@
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */
#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */
#define ARCH_CAP_RSBA BIT(2) /* RET may use alternative branch predictors */
#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */
#define ARCH_CAP_SSB_NO BIT(4) /*
* Not susceptible to Speculative Store Bypass
@@ -120,6 +123,17 @@
* bit available to control VERW
* behavior.
*/
#define ARCH_CAP_RRSBA BIT(19) /*
* Indicates RET may use predictors
* other than the RSB. With eIBRS
* enabled predictions in kernel mode
* are restricted to targets in
* kernel.
*/
#define ARCH_CAP_PBRSB_NO BIT(24) /*
* Not susceptible to Post-Barrier
* Return Stack Buffer Predictions.
*/
#define MSR_IA32_FLUSH_CMD 0x0000010b
#define L1D_FLUSH BIT(0) /*
@@ -385,6 +399,11 @@
#define MSR_AMD64_OSVW_STATUS 0xc0010141
#define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022
#define MSR_AMD64_DE_CFG 0xc0011029
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
#define MSR_AMD64_BU_CFG2 0xc001102a
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
@@ -436,9 +455,6 @@
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
#define FAM10H_MMIO_CONF_BASE_SHIFT 20
#define MSR_FAM10H_NODE_ID 0xc001100c
#define MSR_F10H_DECFG 0xc0011029
#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
/* K8 MSRs */
#define MSR_K8_TOP_MEM1 0xc001001a

View File

@@ -9,6 +9,7 @@
#include <asm/alternative-asm.h>
#include <asm/cpufeatures.h>
#include <asm/msr-index.h>
#include <asm/percpu.h>
/*
* Fill the CPU return stack buffer.
@@ -35,6 +36,7 @@
* the optimal version two calls, each with their own speculation
* trap should their return address end up getting used, in a loop.
*/
#ifdef CONFIG_X86_64
#define __FILL_RETURN_BUFFER(reg, nr, sp) \
mov $(nr/2), reg; \
771: \
@@ -52,7 +54,29 @@
774: \
dec reg; \
jnz 771b; \
add $(BITS_PER_LONG/8) * nr, sp; \
/* barrier for jnz misprediction */ \
lfence;
#else
/*
* i386 doesn't unconditionally have LFENCE, as such it can't
* do a loop.
*/
#define __FILL_RETURN_BUFFER(reg, nr, sp) \
.rept nr; \
call 772f; \
int3; \
772:; \
.endr; \
add $(BITS_PER_LONG/8) * nr, sp;
#endif
#define ISSUE_UNBALANCED_RET_GUARD(sp) \
call 992f; \
int3; \
992: \
add $(BITS_PER_LONG/8), sp; \
lfence;
#ifdef __ASSEMBLY__
@@ -141,13 +165,11 @@
* monstrosity above, manually.
*/
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
#ifdef CONFIG_RETPOLINE
ANNOTATE_NOSPEC_ALTERNATIVE
ALTERNATIVE "jmp .Lskip_rsb_\@", \
__stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
\ftr
.Lskip_rsb_\@:
#endif
.endm
#else /* __ASSEMBLY__ */
@@ -228,6 +250,7 @@ enum spectre_v2_mitigation {
SPECTRE_V2_EIBRS,
SPECTRE_V2_EIBRS_RETPOLINE,
SPECTRE_V2_EIBRS_LFENCE,
SPECTRE_V2_IBRS,
};
/* The indirect branch speculation control variants */
@@ -256,19 +279,19 @@ extern char __indirect_thunk_end[];
* retpoline and IBRS mitigations for Spectre v2 need this; only on future
* CPUs with IBRS_ALL *might* it be avoided.
*/
static inline void vmexit_fill_RSB(void)
static __always_inline void vmexit_fill_RSB(void)
{
#ifdef CONFIG_RETPOLINE
unsigned long loops;
asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
ALTERNATIVE("jmp 910f",
__stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
X86_FEATURE_RETPOLINE)
ALTERNATIVE_2("jmp 910f", "", X86_FEATURE_RSB_VMEXIT,
"jmp 911f", X86_FEATURE_RSB_VMEXIT_LITE)
__stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1))
"911:"
__stringify(ISSUE_UNBALANCED_RET_GUARD(%1))
"910:"
: "=r" (loops), ASM_CALL_CONSTRAINT
: : "memory" );
#endif
}
static __always_inline
@@ -291,6 +314,9 @@ static inline void indirect_branch_prediction_barrier(void)
/* The Intel SPEC CTRL MSR base value cache */
extern u64 x86_spec_ctrl_base;
DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
extern void update_spec_ctrl_cond(u64 val);
extern u64 spec_ctrl_current(void);
/*
* With retpoline, we must use IBRS to restrict branch prediction
@@ -300,18 +326,16 @@ extern u64 x86_spec_ctrl_base;
*/
#define firmware_restrict_branch_speculation_start() \
do { \
u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
\
preempt_disable(); \
alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
alternative_msr_write(MSR_IA32_SPEC_CTRL, \
spec_ctrl_current() | SPEC_CTRL_IBRS, \
X86_FEATURE_USE_IBRS_FW); \
} while (0)
#define firmware_restrict_branch_speculation_end() \
do { \
u64 val = x86_spec_ctrl_base; \
\
alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
alternative_msr_write(MSR_IA32_SPEC_CTRL, \
spec_ctrl_current(), \
X86_FEATURE_USE_IBRS_FW); \
preempt_enable(); \
} while (0)

View File

@@ -761,8 +761,6 @@ static void init_amd_gh(struct cpuinfo_x86 *c)
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
}
#define MSR_AMD64_DE_CFG 0xC0011029
static void init_amd_ln(struct cpuinfo_x86 *c)
{
/*
@@ -857,12 +855,21 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
{
set_cpu_cap(c, X86_FEATURE_ZEN);
/*
* Fix erratum 1076: CPB feature bit not being set in CPUID.
* Always set it, except when running under a hypervisor.
*/
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
set_cpu_cap(c, X86_FEATURE_CPB);
/* Fix up CPUID bits, but only if not virtualised. */
if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
/* Erratum 1076: CPB feature bit not being set in CPUID. */
if (!cpu_has(c, X86_FEATURE_CPB))
set_cpu_cap(c, X86_FEATURE_CPB);
/*
* Zen3 (Fam19 model < 0x10) parts are not susceptible to
* Branch Type Confusion, but predate the allocation of the
* BTC_NO bit.
*/
if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO))
set_cpu_cap(c, X86_FEATURE_BTC_NO);
}
}
static void init_amd(struct cpuinfo_x86 *c)
@@ -925,16 +932,16 @@ static void init_amd(struct cpuinfo_x86 *c)
* msr_set_bit() uses the safe accessors, too, even if the MSR
* is not present.
*/
msr_set_bit(MSR_F10H_DECFG,
MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
msr_set_bit(MSR_AMD64_DE_CFG,
MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
/*
* Verify that the MSR write was successful (could be running
* under a hypervisor) and only then assume that LFENCE is
* serializing.
*/
ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
ret = rdmsrl_safe(MSR_AMD64_DE_CFG, &val);
if (!ret && (val & MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)) {
/* A serializing LFENCE stops RDTSC speculation */
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
} else {

View File

@@ -37,6 +37,8 @@
static void __init spectre_v1_select_mitigation(void);
static void __init spectre_v2_select_mitigation(void);
static void __init retbleed_select_mitigation(void);
static void __init spectre_v2_user_select_mitigation(void);
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);
static void __init mds_select_mitigation(void);
@@ -46,16 +48,47 @@ static void __init taa_select_mitigation(void);
static void __init mmio_select_mitigation(void);
static void __init srbds_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
u64 x86_spec_ctrl_base;
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
/* The current value of the SPEC_CTRL MSR with task-specific bits set */
DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
static DEFINE_MUTEX(spec_ctrl_mutex);
/* Update SPEC_CTRL MSR and its cached copy unconditionally */
static void update_spec_ctrl(u64 val)
{
this_cpu_write(x86_spec_ctrl_current, val);
wrmsrl(MSR_IA32_SPEC_CTRL, val);
}
/*
* The vendor and possibly platform specific bits which can be modified in
* x86_spec_ctrl_base.
* Keep track of the SPEC_CTRL MSR value for the current task, which may differ
* from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
*/
static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
void update_spec_ctrl_cond(u64 val)
{
if (this_cpu_read(x86_spec_ctrl_current) == val)
return;
this_cpu_write(x86_spec_ctrl_current, val);
/*
* When KERNEL_IBRS this MSR is written on return-to-user, unless
* forced the update can be delayed until that time.
*/
if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
wrmsrl(MSR_IA32_SPEC_CTRL, val);
}
u64 spec_ctrl_current(void)
{
return this_cpu_read(x86_spec_ctrl_current);
}
EXPORT_SYMBOL_GPL(spec_ctrl_current);
/*
* AMD specific MSR info for Speculative Store Bypass control.
@@ -105,13 +138,21 @@ void __init check_bugs(void)
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
/* Allow STIBP in MSR_SPEC_CTRL if supported */
if (boot_cpu_has(X86_FEATURE_STIBP))
x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
/* Select the proper CPU mitigations before patching alternatives: */
spectre_v1_select_mitigation();
spectre_v2_select_mitigation();
/*
* retbleed_select_mitigation() relies on the state set by
* spectre_v2_select_mitigation(); specifically it wants to know about
* spectre_v2=ibrs.
*/
retbleed_select_mitigation();
/*
* spectre_v2_user_select_mitigation() relies on the state set by
* retbleed_select_mitigation(); specifically the STIBP selection is
* forced for UNRET.
*/
spectre_v2_user_select_mitigation();
ssb_select_mitigation();
l1tf_select_mitigation();
md_clear_select_mitigation();
@@ -151,31 +192,17 @@ void __init check_bugs(void)
#endif
}
/*
* NOTE: For VMX, this function is not called in the vmexit path.
* It uses vmx_spec_ctrl_restore_host() instead.
*/
void
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
{
u64 msrval, guestval, hostval = x86_spec_ctrl_base;
u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current();
struct thread_info *ti = current_thread_info();
/* Is MSR_SPEC_CTRL implemented ? */
if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
/*
* Restrict guest_spec_ctrl to supported values. Clear the
* modifiable bits in the host base value and or the
* modifiable bits from the guest value.
*/
guestval = hostval & ~x86_spec_ctrl_mask;
guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
/* SSBD controlled in MSR_SPEC_CTRL */
if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
static_cpu_has(X86_FEATURE_AMD_SSBD))
hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
/* Conditional STIBP enabled? */
if (static_branch_unlikely(&switch_to_cond_stibp))
hostval |= stibp_tif_to_spec_ctrl(ti->flags);
if (hostval != guestval) {
msrval = setguest ? guestval : hostval;
wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
@@ -705,12 +732,101 @@ static int __init nospectre_v1_cmdline(char *str)
}
early_param("nospectre_v1", nospectre_v1_cmdline);
#undef pr_fmt
#define pr_fmt(fmt) "Spectre V2 : " fmt
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
SPECTRE_V2_NONE;
#undef pr_fmt
#define pr_fmt(fmt) "RETBleed: " fmt
enum retbleed_mitigation {
RETBLEED_MITIGATION_NONE,
RETBLEED_MITIGATION_IBRS,
RETBLEED_MITIGATION_EIBRS,
};
enum retbleed_mitigation_cmd {
RETBLEED_CMD_OFF,
RETBLEED_CMD_AUTO
};
const char * const retbleed_strings[] = {
[RETBLEED_MITIGATION_NONE] = "Vulnerable",
[RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
[RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
};
static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
RETBLEED_MITIGATION_NONE;
static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init =
RETBLEED_CMD_AUTO;
static int __init retbleed_parse_cmdline(char *str)
{
if (!str)
return -EINVAL;
if (!strcmp(str, "off"))
retbleed_cmd = RETBLEED_CMD_OFF;
else if (!strcmp(str, "auto"))
retbleed_cmd = RETBLEED_CMD_AUTO;
else
pr_err("Unknown retbleed option (%s). Defaulting to 'auto'\n", str);
return 0;
}
early_param("retbleed", retbleed_parse_cmdline);
#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
static void __init retbleed_select_mitigation(void)
{
if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
return;
switch (retbleed_cmd) {
case RETBLEED_CMD_OFF:
return;
case RETBLEED_CMD_AUTO:
default:
/*
* The Intel mitigation (IBRS) was already selected in
* spectre_v2_select_mitigation().
*/
break;
}
switch (retbleed_mitigation) {
default:
break;
}
/*
* Let IBRS trump all on Intel without affecting the effects of the
* retbleed= cmdline option.
*/
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
switch (spectre_v2_enabled) {
case SPECTRE_V2_IBRS:
retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
break;
case SPECTRE_V2_EIBRS:
case SPECTRE_V2_EIBRS_RETPOLINE:
case SPECTRE_V2_EIBRS_LFENCE:
retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
break;
default:
pr_err(RETBLEED_INTEL_MSG);
}
}
pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
}
#undef pr_fmt
#define pr_fmt(fmt) "Spectre V2 : " fmt
static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
SPECTRE_V2_USER_NONE;
static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
@@ -740,6 +856,7 @@ static inline const char *spectre_v2_module_string(void) { return ""; }
#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
#ifdef CONFIG_BPF_SYSCALL
void unpriv_ebpf_notify(int new_state)
@@ -781,6 +898,7 @@ enum spectre_v2_mitigation_cmd {
SPECTRE_V2_CMD_EIBRS,
SPECTRE_V2_CMD_EIBRS_RETPOLINE,
SPECTRE_V2_CMD_EIBRS_LFENCE,
SPECTRE_V2_CMD_IBRS,
};
enum spectre_v2_user_cmd {
@@ -821,13 +939,15 @@ static void __init spec_v2_user_print_cond(const char *reason, bool secure)
pr_info("spectre_v2_user=%s forced on command line.\n", reason);
}
static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
static enum spectre_v2_user_cmd __init
spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
spectre_v2_parse_user_cmdline(void)
{
char arg[20];
int ret, i;
switch (v2_cmd) {
switch (spectre_v2_cmd) {
case SPECTRE_V2_CMD_NONE:
return SPECTRE_V2_USER_CMD_NONE;
case SPECTRE_V2_CMD_FORCE:
@@ -853,15 +973,16 @@ spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
return SPECTRE_V2_USER_CMD_AUTO;
}
static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
{
return (mode == SPECTRE_V2_EIBRS ||
mode == SPECTRE_V2_EIBRS_RETPOLINE ||
mode == SPECTRE_V2_EIBRS_LFENCE);
return mode == SPECTRE_V2_IBRS ||
mode == SPECTRE_V2_EIBRS ||
mode == SPECTRE_V2_EIBRS_RETPOLINE ||
mode == SPECTRE_V2_EIBRS_LFENCE;
}
static void __init
spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
spectre_v2_user_select_mitigation(void)
{
enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
bool smt_possible = IS_ENABLED(CONFIG_SMP);
@@ -874,7 +995,7 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
smt_possible = false;
cmd = spectre_v2_parse_user_cmdline(v2_cmd);
cmd = spectre_v2_parse_user_cmdline();
switch (cmd) {
case SPECTRE_V2_USER_CMD_NONE:
goto set_mode;
@@ -922,12 +1043,12 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
}
/*
* If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not
* required.
* If no STIBP, IBRS or enhanced IBRS is enabled, or SMT impossible,
* STIBP is not required.
*/
if (!boot_cpu_has(X86_FEATURE_STIBP) ||
!smt_possible ||
spectre_v2_in_eibrs_mode(spectre_v2_enabled))
spectre_v2_in_ibrs_mode(spectre_v2_enabled))
return;
/*
@@ -952,6 +1073,7 @@ static const char * const spectre_v2_strings[] = {
[SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS",
[SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE",
[SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines",
[SPECTRE_V2_IBRS] = "Mitigation: IBRS",
};
static const struct {
@@ -969,6 +1091,7 @@ static const struct {
{ "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false },
{ "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false },
{ "auto", SPECTRE_V2_CMD_AUTO, false },
{ "ibrs", SPECTRE_V2_CMD_IBRS, false },
};
static void __init spec_v2_print_cond(const char *reason, bool secure)
@@ -1031,6 +1154,24 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
return SPECTRE_V2_CMD_AUTO;
}
if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
mitigation_options[i].option);
return SPECTRE_V2_CMD_AUTO;
}
if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
mitigation_options[i].option);
return SPECTRE_V2_CMD_AUTO;
}
if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_has(X86_FEATURE_XENPV)) {
pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
mitigation_options[i].option);
return SPECTRE_V2_CMD_AUTO;
}
spec_v2_print_cond(mitigation_options[i].option,
mitigation_options[i].secure);
return cmd;
@@ -1046,6 +1187,70 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
return SPECTRE_V2_RETPOLINE;
}
/* Disable in-kernel use of non-RSB RET predictors */
static void __init spec_ctrl_disable_kernel_rrsba(void)
{
u64 ia32_cap;
if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
return;
ia32_cap = x86_read_arch_cap_msr();
if (ia32_cap & ARCH_CAP_RRSBA) {
x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
update_spec_ctrl(x86_spec_ctrl_base);
}
}
static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
{
/*
* Similar to context switches, there are two types of RSB attacks
* after VM exit:
*
* 1) RSB underflow
*
* 2) Poisoned RSB entry
*
* When retpoline is enabled, both are mitigated by filling/clearing
* the RSB.
*
* When IBRS is enabled, while #1 would be mitigated by the IBRS branch
* prediction isolation protections, RSB still needs to be cleared
* because of #2. Note that SMEP provides no protection here, unlike
* user-space-poisoned RSB entries.
*
* eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
* bug is present then a LITE version of RSB protection is required,
* just a single call needs to retire before a RET is executed.
*/
switch (mode) {
case SPECTRE_V2_NONE:
return;
case SPECTRE_V2_EIBRS_LFENCE:
case SPECTRE_V2_EIBRS:
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB) &&
(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) {
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
}
return;
case SPECTRE_V2_EIBRS_RETPOLINE:
case SPECTRE_V2_RETPOLINE:
case SPECTRE_V2_LFENCE:
case SPECTRE_V2_IBRS:
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
return;
}
pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
dump_stack();
}
static void __init spectre_v2_select_mitigation(void)
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -1070,6 +1275,14 @@ static void __init spectre_v2_select_mitigation(void)
break;
}
if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
retbleed_cmd != RETBLEED_CMD_OFF &&
boot_cpu_has(X86_FEATURE_IBRS) &&
boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
mode = SPECTRE_V2_IBRS;
break;
}
mode = spectre_v2_select_retpoline();
break;
@@ -1086,6 +1299,10 @@ static void __init spectre_v2_select_mitigation(void)
mode = spectre_v2_select_retpoline();
break;
case SPECTRE_V2_CMD_IBRS:
mode = SPECTRE_V2_IBRS;
break;
case SPECTRE_V2_CMD_EIBRS:
mode = SPECTRE_V2_EIBRS;
break;
@@ -1102,10 +1319,9 @@ static void __init spectre_v2_select_mitigation(void)
if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
if (spectre_v2_in_eibrs_mode(mode)) {
/* Force it so VMEXIT will restore correctly */
if (spectre_v2_in_ibrs_mode(mode)) {
x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
update_spec_ctrl(x86_spec_ctrl_base);
}
switch (mode) {
@@ -1113,6 +1329,12 @@ static void __init spectre_v2_select_mitigation(void)
case SPECTRE_V2_EIBRS:
break;
case SPECTRE_V2_IBRS:
setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
break;
case SPECTRE_V2_LFENCE:
case SPECTRE_V2_EIBRS_LFENCE:
setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
@@ -1124,43 +1346,86 @@ static void __init spectre_v2_select_mitigation(void)
break;
}
/*
* Disable alternate RSB predictions in kernel when indirect CALLs and
* JMPs gets protection against BHI and Intramode-BTI, but RET
* prediction from a non-RSB predictor is still a risk.
*/
if (mode == SPECTRE_V2_EIBRS_LFENCE ||
mode == SPECTRE_V2_EIBRS_RETPOLINE ||
mode == SPECTRE_V2_RETPOLINE)
spec_ctrl_disable_kernel_rrsba();
spectre_v2_enabled = mode;
pr_info("%s\n", spectre_v2_strings[mode]);
/*
* If spectre v2 protection has been enabled, unconditionally fill
* RSB during a context switch; this protects against two independent
* issues:
* If Spectre v2 protection has been enabled, fill the RSB during a
* context switch. In general there are two types of RSB attacks
* across context switches, for which the CALLs/RETs may be unbalanced.
*
* - RSB underflow (and switch to BTB) on Skylake+
* - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
* 1) RSB underflow
*
* Some Intel parts have "bottomless RSB". When the RSB is empty,
* speculated return targets may come from the branch predictor,
* which could have a user-poisoned BTB or BHB entry.
*
* AMD has it even worse: *all* returns are speculated from the BTB,
* regardless of the state of the RSB.
*
* When IBRS or eIBRS is enabled, the "user -> kernel" attack
* scenario is mitigated by the IBRS branch prediction isolation
* properties, so the RSB buffer filling wouldn't be necessary to
* protect against this type of attack.
*
* The "user -> user" attack scenario is mitigated by RSB filling.
*
* 2) Poisoned RSB entry
*
* If the 'next' in-kernel return stack is shorter than 'prev',
* 'next' could be tricked into speculating with a user-poisoned RSB
* entry.
*
* The "user -> kernel" attack scenario is mitigated by SMEP and
* eIBRS.
*
* The "user -> user" scenario, also known as SpectreBHB, requires
* RSB clearing.
*
* So to mitigate all cases, unconditionally fill RSB on context
* switches.
*
* FIXME: Is this pointless for retbleed-affected AMD?
*/
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
/*
* Retpoline means the kernel is safe because it has no indirect
* branches. Enhanced IBRS protects firmware too, so, enable restricted
* speculation around firmware calls only when Enhanced IBRS isn't
* supported.
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
* and Enhanced IBRS protect firmware too, so enable IBRS around
* firmware calls only when IBRS / Enhanced IBRS aren't otherwise
* enabled.
*
* Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
* the user might select retpoline on the kernel command line and if
* the CPU supports Enhanced IBRS, kernel might un-intentionally not
* enable IBRS around firmware calls.
*/
if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_eibrs_mode(mode)) {
if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
pr_info("Enabling Restricted Speculation for firmware calls\n");
}
/* Set up IBPB and STIBP depending on the general spectre V2 command */
spectre_v2_user_select_mitigation(cmd);
spectre_v2_cmd = cmd;
}
static void update_stibp_msr(void * __unused)
{
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
update_spec_ctrl(val);
}
/* Update x86_spec_ctrl_base in case SMT state changed. */
@@ -1376,16 +1641,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
break;
}
/*
* If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
* bit in the mask to allow guests to use the mitigation even in the
* case where the host does not enable it.
*/
if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
static_cpu_has(X86_FEATURE_AMD_SSBD)) {
x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
}
/*
* We have three CPU feature flags that are in play here:
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
@@ -1403,7 +1658,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
x86_amd_ssb_disable();
} else {
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
update_spec_ctrl(x86_spec_ctrl_base);
}
}
@@ -1608,7 +1863,7 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
void x86_spec_ctrl_setup_ap(void)
{
if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
update_spec_ctrl(x86_spec_ctrl_base);
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
x86_amd_ssb_disable();
@@ -1843,7 +2098,7 @@ static ssize_t mmio_stale_data_show_state(char *buf)
static char *stibp_state(void)
{
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
if (spectre_v2_in_ibrs_mode(spectre_v2_enabled))
return "";
switch (spectre_v2_user_stibp) {
@@ -1873,6 +2128,19 @@ static char *ibpb_state(void)
return "";
}
static char *pbrsb_eibrs_state(void)
{
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
return ", PBRSB-eIBRS: SW sequence";
else
return ", PBRSB-eIBRS: Vulnerable";
} else {
return ", PBRSB-eIBRS: Not affected";
}
}
static ssize_t spectre_v2_show_state(char *buf)
{
if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
@@ -1885,12 +2153,13 @@ static ssize_t spectre_v2_show_state(char *buf)
spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
return sprintf(buf, "%s%s%s%s%s%s\n",
return sprintf(buf, "%s%s%s%s%s%s%s\n",
spectre_v2_strings[spectre_v2_enabled],
ibpb_state(),
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
stibp_state(),
boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
pbrsb_eibrs_state(),
spectre_v2_module_string());
}
@@ -1899,6 +2168,11 @@ static ssize_t srbds_show_state(char *buf)
return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
}
static ssize_t retbleed_show_state(char *buf)
{
return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
}
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug)
{
@@ -1942,6 +2216,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_MMIO_UNKNOWN:
return mmio_stale_data_show_state(buf);
case X86_BUG_RETBLEED:
return retbleed_show_state(buf);
default:
break;
}
@@ -2001,4 +2278,9 @@ ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *at
else
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
}
ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
{
return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
}
#endif

View File

@@ -906,6 +906,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#define NO_SWAPGS BIT(6)
#define NO_ITLB_MULTIHIT BIT(7)
#define NO_MMIO BIT(8)
#define NO_EIBRS_PBRSB BIT(9)
#define VULNWL(_vendor, _family, _model, _whitelist) \
{ X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
@@ -947,7 +948,7 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
/*
* Technically, swapgs isn't serializing on AMD (despite it previously
@@ -957,7 +958,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
* good enough for our purposes.
*/
VULNWL_INTEL(ATOM_TREMONT_X, NO_ITLB_MULTIHIT),
VULNWL_INTEL(ATOM_TREMONT, NO_EIBRS_PBRSB),
VULNWL_INTEL(ATOM_TREMONT_L, NO_EIBRS_PBRSB),
VULNWL_INTEL(ATOM_TREMONT_X, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
/* AMD Family 0xf - 0x12 */
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
@@ -970,48 +973,55 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
{}
};
#define VULNBL(vendor, family, model, blacklist) \
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist)
#define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \
X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \
INTEL_FAM6_##model, steppings, \
X86_FEATURE_ANY, issues)
#define VULNBL_AMD(family, blacklist) \
VULNBL(AMD, family, X86_MODEL_ANY, blacklist)
#define SRBDS BIT(0)
/* CPU is affected by X86_BUG_MMIO_STALE_DATA */
#define MMIO BIT(1)
/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
#define MMIO_SBDS BIT(2)
/* CPU is affected by RETbleed, speculating where you would not expect it */
#define RETBLEED BIT(3)
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(HASWELL_CORE, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(HASWELL_ULT, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(HASWELL_GT3E, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(HASWELL_X, BIT(2) | BIT(4), MMIO),
VULNBL_INTEL_STEPPINGS(BROADWELL_XEON_D,X86_STEPPINGS(0x3, 0x5), MMIO),
VULNBL_INTEL_STEPPINGS(HASWELL_X, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(BROADWELL_XEON_D,X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(BROADWELL_GT3E, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(BROADWELL_CORE, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE_MOBILE, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO),
VULNBL_INTEL_STEPPINGS(SKYLAKE_MOBILE, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, BIT(3) | BIT(4) | BIT(6) |
BIT(7) | BIT(0xB), MMIO),
VULNBL_INTEL_STEPPINGS(SKYLAKE_DESKTOP, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO),
VULNBL_INTEL_STEPPINGS(SKYLAKE_DESKTOP, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPINGS(0x9, 0xC), SRBDS | MMIO),
VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPINGS(0x0, 0x8), SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPINGS(0x9, 0xD), SRBDS | MMIO),
VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPINGS(0x0, 0x8), SRBDS),
VULNBL_INTEL_STEPPINGS(ICELAKE_MOBILE, X86_STEPPINGS(0x5, 0x5), MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(ICELAKE_XEON_D, X86_STEPPINGS(0x1, 0x1), MMIO),
VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0x6), MMIO),
VULNBL_INTEL_STEPPINGS(COMETLAKE, BIT(2) | BIT(3) | BIT(5), MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO),
VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPINGS(0x1, 0x1), MMIO),
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE_MOBILE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED),
VULNBL_INTEL_STEPPINGS(SKYLAKE_DESKTOP, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
VULNBL_INTEL_STEPPINGS(CANNONLAKE_MOBILE,X86_STEPPING_ANY, RETBLEED),
VULNBL_INTEL_STEPPINGS(ICELAKE_MOBILE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
VULNBL_INTEL_STEPPINGS(ICELAKE_XEON_D, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED),
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_X, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPINGS(0x0, 0x0), MMIO | MMIO_SBDS),
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
VULNBL_AMD(0x15, RETBLEED),
VULNBL_AMD(0x16, RETBLEED),
VULNBL_AMD(0x17, RETBLEED),
{}
};
@@ -1117,6 +1127,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
}
if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
setup_force_cpu_bug(X86_BUG_RETBLEED);
}
if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
!(ia32_cap & ARCH_CAP_PBRSB_NO))
setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;

View File

@@ -16,12 +16,17 @@
* respective wildcard entries.
*
* A typical table entry would be to match a specific CPU
* { X86_VENDOR_INTEL, 6, 0x12 }
* or to match a specific CPU feature
* { X86_FEATURE_MATCH(X86_FEATURE_FOOBAR) }
*
* X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_BROADWELL,
* X86_FEATURE_ANY, NULL);
*
* Fields can be wildcarded with %X86_VENDOR_ANY, %X86_FAMILY_ANY,
* %X86_MODEL_ANY, %X86_FEATURE_ANY or 0 (except for vendor)
* %X86_MODEL_ANY, %X86_FEATURE_ANY (except for vendor)
*
* asm/cpu_device_id.h contains a set of useful macros which are shortcuts
* for various common selections. The above can be shortened to:
*
* X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, NULL);
*
* Arrays used to match for this should also be declared using
* MODULE_DEVICE_TABLE(x86cpu, ...)
@@ -53,3 +58,34 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match)
return NULL;
}
EXPORT_SYMBOL(x86_match_cpu);
static const struct x86_cpu_desc *
x86_match_cpu_with_stepping(const struct x86_cpu_desc *match)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
const struct x86_cpu_desc *m;
for (m = match; m->x86_family | m->x86_model; m++) {
if (c->x86_vendor != m->x86_vendor)
continue;
if (c->x86 != m->x86_family)
continue;
if (c->x86_model != m->x86_model)
continue;
if (c->x86_stepping != m->x86_stepping)
continue;
return m;
}
return NULL;
}
bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table)
{
const struct x86_cpu_desc *res = x86_match_cpu_with_stepping(table);
if (!res || res->x86_microcode_rev > boot_cpu_data.microcode)
return false;
return true;
}
EXPORT_SYMBOL_GPL(x86_cpu_has_min_microcode_rev);

View File

@@ -222,7 +222,13 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p
return ret;
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
if (rev >= mc->hdr.patch_id)
/*
* Allow application of the same revision to pick up SMT-specific
* changes even if the revision of the other SMT thread is already
* up-to-date.
*/
if (rev > mc->hdr.patch_id)
return ret;
if (!__apply_microcode_amd(mc)) {
@@ -304,8 +310,12 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
/* Check whether we have saved a new patch already: */
if (*new_rev && rev < mc->hdr.patch_id) {
/*
* Check whether a new patch has been saved already. Also, allow application of
* the same revision in order to pick up SMT-thread-specific configuration even
* if the sibling SMT thread already has an up-to-date revision.
*/
if (*new_rev && rev <= mc->hdr.patch_id) {
if (!__apply_microcode_amd(mc)) {
*new_rev = mc->hdr.patch_id;
return;

View File

@@ -21,6 +21,7 @@ struct cpuid_bit {
static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
{ X86_FEATURE_RRSBA_CTRL, CPUID_EDX, 2, 0x00000007, 2 },
{ X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 },
{ X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 },
{ X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 },

View File

@@ -55,24 +55,6 @@ void tsx_enable(void)
wrmsrl(MSR_IA32_TSX_CTRL, tsx);
}
static bool __init tsx_ctrl_is_supported(void)
{
u64 ia32_cap = x86_read_arch_cap_msr();
/*
* TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this
* MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
*
* TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
* microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
* bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
* MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
* tsx= cmdline requests will do nothing on CPUs without
* MSR_IA32_TSX_CTRL support.
*/
return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR);
}
static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
{
if (boot_cpu_has_bug(X86_BUG_TAA))
@@ -86,9 +68,22 @@ void __init tsx_init(void)
char arg[5] = {};
int ret;
if (!tsx_ctrl_is_supported())
/*
* TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this
* MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
*
* TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
* microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
* bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
* MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
* tsx= cmdline requests will do nothing on CPUs without
* MSR_IA32_TSX_CTRL support.
*/
if (!(x86_read_arch_cap_msr() & ARCH_CAP_TSX_CTRL_MSR))
return;
setup_force_cpu_cap(X86_FEATURE_MSR_TSX_CTRL);
ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg));
if (ret >= 0) {
if (!strcmp(arg, "on")) {

View File

@@ -435,7 +435,7 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
}
if (updmsr)
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
update_spec_ctrl_cond(msr);
}
static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)

View File

@@ -579,7 +579,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
/* Otherwise, skip ahead to the user-specified starting frame: */
while (!unwind_done(state) &&
(!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
state->sp < (unsigned long)first_frame))
state->sp <= (unsigned long)first_frame))
unwind_next_frame(state);
return;

View File

@@ -663,6 +663,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
g_phys_as = phys_as;
entry->eax = g_phys_as | (virt_as << 8);
entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
entry->edx = 0;
/*
* IBRS, IBPB and VIRT_SSBD aren't necessarily present in

View File

@@ -758,8 +758,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
ctxt->mode, linear);
}
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
enum x86emul_mode mode)
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
{
ulong linear;
int rc;
@@ -769,41 +768,71 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
if (ctxt->op_bytes != sizeof(unsigned long))
addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
if (rc == X86EMUL_CONTINUE)
ctxt->_eip = addr.ea;
return rc;
}
static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
{
return assign_eip(ctxt, dst, ctxt->mode);
u64 efer;
struct desc_struct cs;
u16 selector;
u32 base3;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (!(ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE)) {
/* Real mode. cpu must not have long mode active */
if (efer & EFER_LMA)
return X86EMUL_UNHANDLEABLE;
ctxt->mode = X86EMUL_MODE_REAL;
return X86EMUL_CONTINUE;
}
if (ctxt->eflags & X86_EFLAGS_VM) {
/* Protected/VM86 mode. cpu must not have long mode active */
if (efer & EFER_LMA)
return X86EMUL_UNHANDLEABLE;
ctxt->mode = X86EMUL_MODE_VM86;
return X86EMUL_CONTINUE;
}
if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS))
return X86EMUL_UNHANDLEABLE;
if (efer & EFER_LMA) {
if (cs.l) {
/* Proper long mode */
ctxt->mode = X86EMUL_MODE_PROT64;
} else if (cs.d) {
/* 32 bit compatibility mode*/
ctxt->mode = X86EMUL_MODE_PROT32;
} else {
ctxt->mode = X86EMUL_MODE_PROT16;
}
} else {
/* Legacy 32 bit / 16 bit mode */
ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
}
return X86EMUL_CONTINUE;
}
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
const struct desc_struct *cs_desc)
static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
{
enum x86emul_mode mode = ctxt->mode;
int rc;
return assign_eip(ctxt, dst);
}
#ifdef CONFIG_X86_64
if (ctxt->mode >= X86EMUL_MODE_PROT16) {
if (cs_desc->l) {
u64 efer = 0;
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
{
int rc = emulator_recalc_and_set_mode(ctxt);
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
mode = X86EMUL_MODE_PROT64;
} else
mode = X86EMUL_MODE_PROT32; /* temporary value */
}
#endif
if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
rc = assign_eip(ctxt, dst, mode);
if (rc == X86EMUL_CONTINUE)
ctxt->mode = mode;
return rc;
if (rc != X86EMUL_CONTINUE)
return rc;
return assign_eip(ctxt, dst);
}
static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
@@ -2205,7 +2234,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
rc = assign_eip_far(ctxt, ctxt->src.val);
/* Error handling is not implemented. */
if (rc != X86EMUL_CONTINUE)
return X86EMUL_UNHANDLEABLE;
@@ -2286,7 +2315,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
&new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, eip, &new_desc);
rc = assign_eip_far(ctxt, eip);
/* Error handling is not implemented. */
if (rc != X86EMUL_CONTINUE)
return X86EMUL_UNHANDLEABLE;
@@ -2889,6 +2918,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ctxt->_eip = rdx;
ctxt->mode = usermode;
*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
return X86EMUL_CONTINUE;
@@ -3474,7 +3504,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
rc = assign_eip_far(ctxt, ctxt->src.val);
if (rc != X86EMUL_CONTINUE)
goto fail;
@@ -3621,11 +3651,25 @@ static int em_movbe(struct x86_emulate_ctxt *ctxt)
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
int cr_num = ctxt->modrm_reg;
int r;
if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val))
return emulate_gp(ctxt, 0);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
if (cr_num == 0) {
/*
* CR0 write might have updated CR0.PE and/or CR0.PG
* which can affect the cpu's execution mode.
*/
r = emulator_recalc_and_set_mode(ctxt);
if (r != X86EMUL_CONTINUE)
return r;
}
return X86EMUL_CONTINUE;
}

View File

@@ -47,6 +47,7 @@
#include <asm/irq_remapping.h>
#include <asm/microcode.h>
#include <asm/spec-ctrl.h>
#include <asm/cpu_device_id.h>
#include <asm/virtext.h>
#include "trace.h"
@@ -3644,9 +3645,9 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
msr->data = 0;
switch (msr->index) {
case MSR_F10H_DECFG:
if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
case MSR_AMD64_DE_CFG:
if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
break;
default:
return 1;
@@ -3755,7 +3756,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = 0x1E;
}
break;
case MSR_F10H_DECFG:
case MSR_AMD64_DE_CFG:
msr_info->data = svm->msr_decfg;
break;
default:
@@ -3947,7 +3948,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_VM_IGNNE:
vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
break;
case MSR_F10H_DECFG: {
case MSR_AMD64_DE_CFG: {
struct kvm_msr_entry msr_entry;
msr_entry.index = msr->index;

View File

@@ -40,6 +40,7 @@
#include "x86.h"
#include <asm/cpu.h>
#include <asm/cpu_device_id.h>
#include <asm/io.h>
#include <asm/desc.h>
#include <asm/vmx.h>
@@ -9769,10 +9770,36 @@ static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
}
u64 __always_inline vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx)
{
u64 guestval, hostval = this_cpu_read(x86_spec_ctrl_current);
if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL))
return 0;
guestval = __rdmsr(MSR_IA32_SPEC_CTRL);
/*
* If the guest/host SPEC_CTRL values differ, restore the host value.
*
* For legacy IBRS, the IBRS bit always needs to be written after
* transitioning from a less privileged predictor mode, regardless of
* whether the guest/host values differ.
*/
if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) ||
guestval != hostval)
native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval);
barrier_nospec();
return guestval;
}
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long debugctlmsr, cr3, cr4;
u64 spec_ctrl;
/* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!cpu_has_virtual_nmis() &&
@@ -9966,6 +9993,23 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
, "eax", "ebx", "edi", "esi"
#endif
);
/*
* IMPORTANT: RSB filling and SPEC_CTRL handling must be done before
* the first unbalanced RET after vmexit!
*
* For retpoline or IBRS, RSB filling is needed to prevent poisoned RSB
* entries and (in some cases) RSB underflow.
*
* eIBRS has its own protection against poisoned RSB, so it doesn't
* need the RSB filling sequence. But it does need to be enabled, and a
* single call to retire, before the first unbalanced RET.
*
* So no RETs before vmx_spec_ctrl_restore_host() below.
*/
vmexit_fill_RSB();
/* Save this for below */
spec_ctrl = vmx_spec_ctrl_restore_host(vmx);
vmx_enable_fb_clear(vmx);
@@ -9985,12 +10029,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* save it.
*/
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();
vmx->spec_ctrl = spec_ctrl;
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
if (debugctlmsr)

View File

@@ -1063,7 +1063,7 @@ static unsigned num_emulated_msrs;
* can be used by a hypervisor to validate requested CPU features.
*/
static u32 msr_based_features[] = {
MSR_F10H_DECFG,
MSR_AMD64_DE_CFG,
MSR_IA32_UCODE_REV,
MSR_IA32_ARCH_CAPABILITIES,
};

View File

@@ -124,9 +124,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PHYSICAL_PAGE_MASK;
phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr+1) - phys_addr;
/*
* Mask out any bits not part of the actual physical
* address, like memory encryption bits.
*/
phys_addr &= PHYSICAL_PAGE_MASK;
retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
pcm, &new_pcm);
if (retval) {

View File

@@ -527,15 +527,23 @@ static int pm_cpu_check(const struct x86_cpu_id *c)
static void pm_save_spec_msr(void)
{
u32 spec_msr_id[] = {
MSR_IA32_SPEC_CTRL,
MSR_IA32_TSX_CTRL,
MSR_TSX_FORCE_ABORT,
MSR_IA32_MCU_OPT_CTRL,
MSR_AMD64_LS_CFG,
struct msr_enumeration {
u32 msr_no;
u32 feature;
} msr_enum[] = {
{ MSR_IA32_SPEC_CTRL, X86_FEATURE_MSR_SPEC_CTRL },
{ MSR_IA32_TSX_CTRL, X86_FEATURE_MSR_TSX_CTRL },
{ MSR_TSX_FORCE_ABORT, X86_FEATURE_TSX_FORCE_ABORT },
{ MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL },
{ MSR_AMD64_LS_CFG, X86_FEATURE_LS_CFG_SSBD },
{ MSR_AMD64_DE_CFG, X86_FEATURE_LFENCE_RDTSC },
};
int i;
msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
for (i = 0; i < ARRAY_SIZE(msr_enum); i++) {
if (boot_cpu_has(msr_enum[i].feature))
msr_build_context(&msr_enum[i].msr_no, 1);
}
}
static int pm_check_save_msr(void)

View File

@@ -298,6 +298,8 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
*/
void bfq_schedule_dispatch(struct bfq_data *bfqd)
{
lockdep_assert_held(&bfqd->lock);
if (bfqd->queued != 0) {
bfq_log(bfqd, "schedule dispatch");
blk_mq_run_hw_queues(bfqd->queue, true);
@@ -4584,8 +4586,8 @@ bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_bfqq_expire(bfqd, bfqq, true, reason);
schedule_dispatch:
spin_unlock_irqrestore(&bfqd->lock, flags);
bfq_schedule_dispatch(bfqd);
spin_unlock_irqrestore(&bfqd->lock, flags);
}
/*

View File

@@ -94,8 +94,8 @@ struct opal_dev {
u64 lowest_lba;
size_t pos;
u8 cmd[IO_BUFFER_LENGTH];
u8 resp[IO_BUFFER_LENGTH];
u8 *cmd;
u8 *resp;
struct parsed_resp parsed;
size_t prev_d_len;
@@ -2011,6 +2011,8 @@ void free_opal_dev(struct opal_dev *dev)
if (!dev)
return;
clean_opal_dev(dev);
kfree(dev->resp);
kfree(dev->cmd);
kfree(dev);
}
EXPORT_SYMBOL(free_opal_dev);
@@ -2023,16 +2025,38 @@ struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv)
if (!dev)
return NULL;
/*
* Presumably DMA-able buffers must be cache-aligned. Kmalloc makes
* sure the allocated buffer is DMA-safe in that regard.
*/
dev->cmd = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL);
if (!dev->cmd)
goto err_free_dev;
dev->resp = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL);
if (!dev->resp)
goto err_free_cmd;
INIT_LIST_HEAD(&dev->unlk_lst);
mutex_init(&dev->dev_lock);
dev->data = data;
dev->send_recv = send_recv;
if (check_opal_support(dev) != 0) {
pr_debug("Opal is not supported on this device\n");
kfree(dev);
return NULL;
goto err_free_resp;
}
return dev;
err_free_resp:
kfree(dev->resp);
err_free_cmd:
kfree(dev->cmd);
err_free_dev:
kfree(dev);
return NULL;
}
EXPORT_SYMBOL(init_opal_dev);

View File

@@ -13,6 +13,7 @@
#include <linux/ratelimit.h>
#include <linux/edac.h>
#include <linux/ras.h>
#include <acpi/ghes.h>
#include <asm/cpu.h>
#include <asm/mce.h>
@@ -141,8 +142,8 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
int cpu = mce->extcpu;
struct acpi_hest_generic_status *estatus, *tmp;
struct acpi_hest_generic_data *gdata;
const guid_t *fru_id = &guid_null;
char *fru_text = "";
const guid_t *fru_id;
char *fru_text;
guid_t *sec_type;
static u32 err_seq;
@@ -163,17 +164,23 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
/* log event via trace */
err_seq++;
gdata = (struct acpi_hest_generic_data *)(tmp + 1);
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
fru_id = (guid_t *)gdata->fru_id;
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
fru_text = gdata->fru_text;
sec_type = (guid_t *)gdata->section_type;
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem = (void *)(gdata + 1);
if (gdata->error_data_length >= sizeof(*mem))
trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
(u8)gdata->error_severity);
apei_estatus_for_each_section(tmp, gdata) {
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
fru_id = (guid_t *)gdata->fru_id;
else
fru_id = &guid_null;
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
fru_text = gdata->fru_text;
else
fru_text = "";
sec_type = (guid_t *)gdata->section_type;
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem = (void *)(gdata + 1);
if (gdata->error_data_length >= sizeof(*mem))
trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
(u8)gdata->error_severity);
}
}
out:

View File

@@ -447,6 +447,70 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "PF5LUXG"),
},
},
/*
* More Tongfang devices with the same issue as the Clevo NL5xRU and
* NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description above.
*/
{
.callback = video_detect_force_native,
.ident = "TongFang GKxNRxx",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GKxNRxx"),
},
},
{
.callback = video_detect_force_native,
.ident = "TongFang GKxNRxx",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
DMI_MATCH(DMI_BOARD_NAME, "POLARIS1501A1650TI"),
},
},
{
.callback = video_detect_force_native,
.ident = "TongFang GKxNRxx",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
DMI_MATCH(DMI_BOARD_NAME, "POLARIS1501A2060"),
},
},
{
.callback = video_detect_force_native,
.ident = "TongFang GKxNRxx",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
DMI_MATCH(DMI_BOARD_NAME, "POLARIS1701A1650TI"),
},
},
{
.callback = video_detect_force_native,
.ident = "TongFang GKxNRxx",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
DMI_MATCH(DMI_BOARD_NAME, "POLARIS1701A2060"),
},
},
{
.callback = video_detect_force_native,
.ident = "TongFang GMxNGxx",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GMxNGxx"),
},
},
{
.callback = video_detect_force_native,
.ident = "TongFang GMxZGxx",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GMxZGxx"),
},
},
{
.callback = video_detect_force_native,
.ident = "TongFang GMxRGxx",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
},
},
/*
* Desktops which falsely report a backlight and which our heuristics
* for this do not catch.

View File

@@ -260,7 +260,7 @@ enum {
ICH_MAP = 0x90, /* ICH MAP register */
/* em constants */
EM_MAX_SLOTS = 8,
EM_MAX_SLOTS = SATA_PMP_MAX_PORTS,
EM_MAX_RETRY = 5,
/* em_ctl bits */

View File

@@ -887,4 +887,4 @@ module_platform_driver(imx_ahci_driver);
MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ahci:imx");
MODULE_ALIAS("platform:" DRV_NAME);

View File

@@ -292,9 +292,10 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
outb(inb(0x1F4) & 0x07, 0x1F4);
rt = inb(0x1F3);
rt &= 0x07 << (3 * adev->devno);
rt &= ~(0x07 << (3 * !adev->devno));
if (pio)
rt |= (1 + 3 * pio) << (3 * adev->devno);
rt |= (1 + 3 * pio) << (3 * !adev->devno);
outb(rt, 0x1F3);
udelay(100);
outb(inb(0x1F2) | 0x01, 0x1F2);

View File

@@ -656,6 +656,12 @@ ssize_t __weak cpu_show_mmio_stale_data(struct device *dev,
return sysfs_emit(buf, "Not affected\n");
}
ssize_t __weak cpu_show_retbleed(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
@@ -666,6 +672,7 @@ static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -678,6 +685,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_itlb_multihit.attr,
&dev_attr_srbds.attr,
&dev_attr_mmio_stale_data.attr,
&dev_attr_retbleed.attr,
NULL
};

View File

@@ -2232,6 +2232,10 @@ static int genpd_iterate_idle_states(struct device_node *dn,
np = it.node;
if (!of_match_node(idle_state_match, np))
continue;
if (!of_device_is_available(np))
continue;
if (states) {
ret = genpd_parse_state(&states[i], np);
if (ret) {

View File

@@ -2804,7 +2804,7 @@ static int init_submitter(struct drbd_device *device)
enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
{
struct drbd_resource *resource = adm_ctx->resource;
struct drbd_connection *connection;
struct drbd_connection *connection, *n;
struct drbd_device *device;
struct drbd_peer_device *peer_device, *tmp_peer_device;
struct gendisk *disk;
@@ -2933,7 +2933,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
out_idr_remove_vol:
idr_remove(&connection->peer_devices, vnr);
out_idr_remove_from_resource:
for_each_connection(connection, resource) {
for_each_connection_safe(connection, n, resource) {
peer_device = idr_remove(&connection->peer_devices, vnr);
if (peer_device)
kref_put(&connection->kref, drbd_destroy_connection);

View File

@@ -268,6 +268,9 @@ EXPORT_SYMBOL_GPL(sunxi_rsb_driver_register);
/* common code that starts a transfer */
static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
{
u32 int_mask, status;
bool timeout;
if (readl(rsb->regs + RSB_CTRL) & RSB_CTRL_START_TRANS) {
dev_dbg(rsb->dev, "RSB transfer still in progress\n");
return -EBUSY;
@@ -275,13 +278,23 @@ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
reinit_completion(&rsb->complete);
writel(RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER,
rsb->regs + RSB_INTE);
int_mask = RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER;
writel(int_mask, rsb->regs + RSB_INTE);
writel(RSB_CTRL_START_TRANS | RSB_CTRL_GLOBAL_INT_ENB,
rsb->regs + RSB_CTRL);
if (!wait_for_completion_io_timeout(&rsb->complete,
msecs_to_jiffies(100))) {
if (irqs_disabled()) {
timeout = readl_poll_timeout_atomic(rsb->regs + RSB_INTS,
status, (status & int_mask),
10, 100000);
writel(status, rsb->regs + RSB_INTS);
} else {
timeout = !wait_for_completion_io_timeout(&rsb->complete,
msecs_to_jiffies(100));
status = rsb->status;
}
if (timeout) {
dev_dbg(rsb->dev, "RSB timeout\n");
/* abort the transfer */
@@ -293,18 +306,18 @@ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb)
return -ETIMEDOUT;
}
if (rsb->status & RSB_INTS_LOAD_BSY) {
if (status & RSB_INTS_LOAD_BSY) {
dev_dbg(rsb->dev, "RSB busy\n");
return -EBUSY;
}
if (rsb->status & RSB_INTS_TRANS_ERR) {
if (rsb->status & RSB_INTS_TRANS_ERR_ACK) {
if (status & RSB_INTS_TRANS_ERR) {
if (status & RSB_INTS_TRANS_ERR_ACK) {
dev_dbg(rsb->dev, "RSB slave nack\n");
return -EINVAL;
}
if (rsb->status & RSB_INTS_TRANS_ERR_DATA) {
if (status & RSB_INTS_TRANS_ERR_DATA) {
dev_dbg(rsb->dev, "RSB transfer data error\n");
return -EIO;
}

View File

@@ -47,6 +47,7 @@
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/cpu_device_id.h>
MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
MODULE_DESCRIPTION("ACPI Processor P-States Driver");

View File

@@ -20,6 +20,7 @@
#include <asm/msr.h>
#include <asm/cpufeature.h>
#include <asm/cpu_device_id.h>
#include "cpufreq_ondemand.h"

View File

@@ -252,6 +252,8 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
ATC_SPIP_BOUNDARY(first->boundary));
channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
ATC_DPIP_BOUNDARY(first->boundary));
/* Don't allow CPU to reorder channel enable. */
wmb();
dma_writel(atdma, CHER, atchan->mask);
vdbg_dump_regs(atchan);
@@ -312,7 +314,8 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
struct at_desc *desc_first = atc_first_active(atchan);
struct at_desc *desc;
int ret;
u32 ctrla, dscr, trials;
u32 ctrla, dscr;
unsigned int i;
/*
* If the cookie doesn't match to the currently running transfer then
@@ -382,7 +385,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
dscr = channel_readl(atchan, DSCR);
rmb(); /* ensure DSCR is read before CTRLA */
ctrla = channel_readl(atchan, CTRLA);
for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) {
u32 new_dscr;
rmb(); /* ensure DSCR is read after CTRLA */
@@ -408,7 +411,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
rmb(); /* ensure DSCR is read before CTRLA */
ctrla = channel_readl(atchan, CTRLA);
}
if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
if (unlikely(i == ATC_MAX_DSCR_TRIALS))
return -ETIMEDOUT;
/* for the first descriptor we can be more accurate */
@@ -556,10 +559,6 @@ static void atc_handle_error(struct at_dma_chan *atchan)
bad_desc = atc_first_active(atchan);
list_del_init(&bad_desc->desc_node);
/* As we are stopped, take advantage to push queued descriptors
* in active_list */
list_splice_init(&atchan->queue, atchan->active_list.prev);
/* Try to restart the controller */
if (!list_empty(&atchan->active_list))
atc_dostart(atchan, atc_first_active(atchan));
@@ -680,19 +679,11 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&atchan->lock, flags);
cookie = dma_cookie_assign(tx);
if (list_empty(&atchan->active_list)) {
dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
desc->txd.cookie);
atc_dostart(atchan, desc);
list_add_tail(&desc->desc_node, &atchan->active_list);
} else {
dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
desc->txd.cookie);
list_add_tail(&desc->desc_node, &atchan->queue);
}
list_add_tail(&desc->desc_node, &atchan->queue);
spin_unlock_irqrestore(&atchan->lock, flags);
dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
desc->txd.cookie);
return cookie;
}
@@ -1962,7 +1953,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
plat_dat->nr_channels);
dma_async_device_register(&atdma->dma_common);
err = dma_async_device_register(&atdma->dma_common);
if (err) {
dev_err(&pdev->dev, "Unable to register: %d.\n", err);
goto err_dma_async_device_register;
}
/*
* Do not return an error if the dmac node is not present in order to
@@ -1982,6 +1977,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
err_of_dma_controller_register:
dma_async_device_unregister(&atdma->dma_common);
err_dma_async_device_register:
dma_pool_destroy(atdma->memset_pool);
err_memset_pool_create:
dma_pool_destroy(atdma->dma_desc_pool);

View File

@@ -168,13 +168,13 @@
/* LLI == Linked List Item; aka DMA buffer descriptor */
struct at_lli {
/* values that are not changed by hardware */
dma_addr_t saddr;
dma_addr_t daddr;
u32 saddr;
u32 daddr;
/* value that may get written back: */
u32 ctrla;
u32 ctrla;
/* more values that are not changed by hardware */
u32 ctrlb;
dma_addr_t dscr; /* chain to next lli */
u32 ctrlb;
u32 dscr; /* chain to next lli */
};
/**

View File

@@ -901,6 +901,7 @@ static int mv_xor_v2_remove(struct platform_device *pdev)
tasklet_kill(&xor_dev->irq_tasklet);
clk_disable_unprepare(xor_dev->clk);
clk_disable_unprepare(xor_dev->reg_clk);
return 0;
}

View File

@@ -532,7 +532,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
seed = early_memremap(efi.rng_seed, sizeof(*seed));
if (seed != NULL) {
size = seed->size;
size = min(seed->size, EFI_RANDOM_SEED_SIZE);
early_memunmap(seed, sizeof(*seed));
} else {
pr_err("Could not map UEFI random seed!\n");
@@ -541,7 +541,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
seed = early_memremap(efi.rng_seed,
sizeof(*seed) + size);
if (seed != NULL) {
add_device_randomness(seed->bits, seed->size);
add_device_randomness(seed->bits, size);
early_memunmap(seed, sizeof(*seed) + size);
pr_notice("seeding entropy pool\n");
} else {

View File

@@ -310,11 +310,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
if (r)
goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
r = amdgpu_mn_register(bo, args->addr);
if (r)
goto release_object;
}
r = amdgpu_mn_register(bo, args->addr);
if (r)
goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
down_read(&current->mm->mmap_sem);

View File

@@ -55,13 +55,13 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
goto err_unpin_pages;
}
ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
ret = sg_alloc_table(st, obj->mm.pages->orig_nents, GFP_KERNEL);
if (ret)
goto err_free;
src = obj->mm.pages->sgl;
dst = st->sgl;
for (i = 0; i < obj->mm.pages->nents; i++) {
for (i = 0; i < obj->mm.pages->orig_nents; i++) {
sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst);
src = sg_next(src);

View File

@@ -243,8 +243,9 @@ static int imx_tve_connector_get_modes(struct drm_connector *connector)
return ret;
}
static int imx_tve_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
static enum drm_mode_status
imx_tve_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct imx_tve *tve = con_to_tve(connector);
unsigned long rate;

View File

@@ -291,6 +291,11 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
struct platform_device *pdev = hdmi->pdev;
int ret;
if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) {
DRM_DEV_ERROR(dev->dev, "too many bridges\n");
return -ENOSPC;
}
hdmi->dev = dev;
hdmi->encoder = encoder;

View File

@@ -71,8 +71,9 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector)
return ret;
}
static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
static enum drm_mode_status
mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);

View File

@@ -364,7 +364,12 @@ static int __init vc4_drm_register(void)
if (ret)
return ret;
return platform_driver_register(&vc4_platform_driver);
ret = platform_driver_register(&vc4_platform_driver);
if (ret)
platform_unregister_drivers(component_drivers,
ARRAY_SIZE(component_drivers));
return ret;
}
static void __exit vc4_drm_unregister(void)

View File

@@ -500,7 +500,7 @@ static int mousevsc_probe(struct hv_device *device,
ret = hid_add_device(hid_dev);
if (ret)
goto probe_err1;
goto probe_err2;
ret = hid_parse(hid_dev);

View File

@@ -343,7 +343,7 @@ static int magicmouse_raw_event(struct hid_device *hdev,
magicmouse_raw_event(hdev, report, data + 2, data[1]);
magicmouse_raw_event(hdev, report, data + 2 + data[1],
size - 2 - data[1]);
break;
return 0;
default:
return 0;
}

View File

@@ -255,10 +255,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
*/
if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL) {
for (i = 0; i < ARRAY_SIZE(tjmax_pci_table); i++) {
if (host_bridge->device == tjmax_pci_table[i].device)
if (host_bridge->device == tjmax_pci_table[i].device) {
pci_dev_put(host_bridge);
return tjmax_pci_table[i].tjmax;
}
}
}
pci_dev_put(host_bridge);
for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
if (strstr(c->x86_model_id, tjmax_table[i].id))
@@ -531,6 +534,10 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx)
{
struct temp_data *tdata = pdata->core_data[indx];
/* if we errored on add then this is already gone */
if (!tdata)
return;
/* Remove the sysfs attributes */
sysfs_remove_group(&pdata->hwmon_dev->kobj, &tdata->attr_group);

View File

@@ -117,7 +117,7 @@ static int i5500_temp_probe(struct pci_dev *pdev,
u32 tstimer;
s8 tsfsc;
err = pci_enable_device(pdev);
err = pcim_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Failed to enable device\n");
return err;

View File

@@ -517,6 +517,7 @@ static void ibmpex_register_bmc(int iface, struct device *dev)
return;
out_register:
list_del(&data->list);
hwmon_device_unregister(data->hwmon_dev);
out_user:
ipmi_destroy_user(data->user);

View File

@@ -900,6 +900,7 @@ static struct platform_driver xiic_i2c_driver = {
module_platform_driver(xiic_i2c_driver);
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("info@mocean-labs.com");
MODULE_DESCRIPTION("Xilinx I2C bus driver");
MODULE_LICENSE("GPL v2");

View File

@@ -58,11 +58,13 @@
#include <linux/tick.h>
#include <trace/events/power.h>
#include <linux/sched.h>
#include <linux/sched/smt.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/moduleparam.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include <asm/nospec-branch.h>
#include <asm/mwait.h>
#include <asm/msr.h>
@@ -97,6 +99,8 @@ static const struct idle_cpu *icpu;
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
static int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
static int intel_idle_ibrs(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
static void intel_idle_s2idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
static struct cpuidle_state *cpuidle_state_table;
@@ -109,6 +113,12 @@ static struct cpuidle_state *cpuidle_state_table;
*/
#define CPUIDLE_FLAG_TLB_FLUSHED 0x10000
/*
* Disable IBRS across idle (when KERNEL_IBRS), is exclusive vs IRQ_ENABLE
* above.
*/
#define CPUIDLE_FLAG_IBRS BIT(16)
/*
* MWAIT takes an 8-bit "hint" in EAX "suggesting"
* the C-state (top nibble) and sub-state (bottom nibble)
@@ -617,7 +627,7 @@ static struct cpuidle_state skl_cstates[] = {
{
.name = "C6",
.desc = "MWAIT 0x20",
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 85,
.target_residency = 200,
.enter = &intel_idle,
@@ -625,7 +635,7 @@ static struct cpuidle_state skl_cstates[] = {
{
.name = "C7s",
.desc = "MWAIT 0x33",
.flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED,
.flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 124,
.target_residency = 800,
.enter = &intel_idle,
@@ -633,7 +643,7 @@ static struct cpuidle_state skl_cstates[] = {
{
.name = "C8",
.desc = "MWAIT 0x40",
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 200,
.target_residency = 800,
.enter = &intel_idle,
@@ -641,7 +651,7 @@ static struct cpuidle_state skl_cstates[] = {
{
.name = "C9",
.desc = "MWAIT 0x50",
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 480,
.target_residency = 5000,
.enter = &intel_idle,
@@ -649,7 +659,7 @@ static struct cpuidle_state skl_cstates[] = {
{
.name = "C10",
.desc = "MWAIT 0x60",
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 890,
.target_residency = 5000,
.enter = &intel_idle,
@@ -678,7 +688,7 @@ static struct cpuidle_state skx_cstates[] = {
{
.name = "C6",
.desc = "MWAIT 0x20",
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 133,
.target_residency = 600,
.enter = &intel_idle,
@@ -935,6 +945,24 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
return index;
}
static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
bool smt_active = sched_smt_active();
u64 spec_ctrl = spec_ctrl_current();
int ret;
if (smt_active)
wrmsrl(MSR_IA32_SPEC_CTRL, 0);
ret = intel_idle(dev, drv, index);
if (smt_active)
wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
return ret;
}
/**
* intel_idle_s2idle - simplified "enter" callback routine for suspend-to-idle
* @dev: cpuidle_device
@@ -1375,6 +1403,11 @@ static void __init intel_idle_cpuidle_driver_init(void)
mark_tsc_unstable("TSC halts in idle"
" states deeper than C2");
if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) &&
cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IBRS) {
drv->states[drv->state_count].enter = intel_idle_ibrs;
}
drv->states[drv->state_count] = /* structure copy */
cpuidle_state_table[cstate];

View File

@@ -618,8 +618,10 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *idev,
trig->ops = &at91_adc_trigger_ops;
ret = iio_trigger_register(trig);
if (ret)
if (ret) {
iio_trigger_free(trig);
return NULL;
}
return trig;
}

View File

@@ -253,14 +253,14 @@ static int afe4403_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct afe4403_data *afe = iio_priv(indio_dev);
unsigned int reg = afe4403_channel_values[chan->address];
unsigned int field = afe4403_channel_leds[chan->address];
unsigned int reg, field;
int ret;
switch (chan->type) {
case IIO_INTENSITY:
switch (mask) {
case IIO_CHAN_INFO_RAW:
reg = afe4403_channel_values[chan->address];
ret = afe4403_read(afe, reg, val);
if (ret)
return ret;
@@ -270,6 +270,7 @@ static int afe4403_read_raw(struct iio_dev *indio_dev,
case IIO_CURRENT:
switch (mask) {
case IIO_CHAN_INFO_RAW:
field = afe4403_channel_leds[chan->address];
ret = regmap_field_read(afe->fields[field], val);
if (ret)
return ret;

View File

@@ -258,20 +258,20 @@ static int afe4404_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct afe4404_data *afe = iio_priv(indio_dev);
unsigned int value_reg = afe4404_channel_values[chan->address];
unsigned int led_field = afe4404_channel_leds[chan->address];
unsigned int offdac_field = afe4404_channel_offdacs[chan->address];
unsigned int value_reg, led_field, offdac_field;
int ret;
switch (chan->type) {
case IIO_INTENSITY:
switch (mask) {
case IIO_CHAN_INFO_RAW:
value_reg = afe4404_channel_values[chan->address];
ret = regmap_read(afe->regmap, value_reg, val);
if (ret)
return ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
offdac_field = afe4404_channel_offdacs[chan->address];
ret = regmap_field_read(afe->fields[offdac_field], val);
if (ret)
return ret;
@@ -281,6 +281,7 @@ static int afe4404_read_raw(struct iio_dev *indio_dev,
case IIO_CURRENT:
switch (mask) {
case IIO_CHAN_INFO_RAW:
led_field = afe4404_channel_leds[chan->address];
ret = regmap_field_read(afe->fields[led_field], val);
if (ret)
return ret;
@@ -303,19 +304,20 @@ static int afe4404_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct afe4404_data *afe = iio_priv(indio_dev);
unsigned int led_field = afe4404_channel_leds[chan->address];
unsigned int offdac_field = afe4404_channel_offdacs[chan->address];
unsigned int led_field, offdac_field;
switch (chan->type) {
case IIO_INTENSITY:
switch (mask) {
case IIO_CHAN_INFO_OFFSET:
offdac_field = afe4404_channel_offdacs[chan->address];
return regmap_field_write(afe->fields[offdac_field], val);
}
break;
case IIO_CURRENT:
switch (mask) {
case IIO_CHAN_INFO_RAW:
led_field = afe4404_channel_leds[chan->address];
return regmap_field_write(afe->fields[led_field], val);
}
break;

View File

@@ -61,8 +61,12 @@ int iio_register_sw_trigger_type(struct iio_sw_trigger_type *t)
t->group = configfs_register_default_group(iio_triggers_group, t->name,
&iio_trigger_type_group_type);
if (IS_ERR(t->group))
if (IS_ERR(t->group)) {
mutex_lock(&iio_trigger_types_lock);
list_del(&t->list);
mutex_unlock(&iio_trigger_types_lock);
ret = PTR_ERR(t->group);
}
return ret;
}

View File

@@ -237,6 +237,8 @@ config RPR0521
tristate "ROHM RPR0521 ALS and proximity sensor driver"
depends on I2C
select REGMAP_I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
Say Y here if you want to build support for ROHM's RPR0521
ambient light and proximity sensor device.

View File

@@ -63,9 +63,6 @@
#define APDS9960_REG_CONTROL_PGAIN_MASK_SHIFT 2
#define APDS9960_REG_CONFIG_2 0x90
#define APDS9960_REG_CONFIG_2_GGAIN_MASK 0x60
#define APDS9960_REG_CONFIG_2_GGAIN_MASK_SHIFT 5
#define APDS9960_REG_ID 0x92
#define APDS9960_REG_STATUS 0x93
@@ -86,6 +83,9 @@
#define APDS9960_REG_GCONF_1_GFIFO_THRES_MASK_SHIFT 6
#define APDS9960_REG_GCONF_2 0xa3
#define APDS9960_REG_GCONF_2_GGAIN_MASK 0x60
#define APDS9960_REG_GCONF_2_GGAIN_MASK_SHIFT 5
#define APDS9960_REG_GOFFSET_U 0xa4
#define APDS9960_REG_GOFFSET_D 0xa5
#define APDS9960_REG_GPULSE 0xa6
@@ -404,9 +404,9 @@ static int apds9960_set_pxs_gain(struct apds9960_data *data, int val)
}
ret = regmap_update_bits(data->regmap,
APDS9960_REG_CONFIG_2,
APDS9960_REG_CONFIG_2_GGAIN_MASK,
idx << APDS9960_REG_CONFIG_2_GGAIN_MASK_SHIFT);
APDS9960_REG_GCONF_2,
APDS9960_REG_GCONF_2_GGAIN_MASK,
idx << APDS9960_REG_GCONF_2_GGAIN_MASK_SHIFT);
if (!ret)
data->pxs_gain = idx;
mutex_unlock(&data->lock);

View File

@@ -867,7 +867,7 @@ static int tsl2583_probe(struct i2c_client *clientp,
TSL2583_POWER_OFF_DELAY_MS);
pm_runtime_use_autosuspend(&clientp->dev);
ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
ret = iio_device_register(indio_dev);
if (ret) {
dev_err(&clientp->dev, "%s: iio registration failed\n",
__func__);

View File

@@ -95,7 +95,7 @@ static int ms5611_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
spi->mode = SPI_MODE_0;
spi->max_speed_hz = 20000000;
spi->max_speed_hz = min(spi->max_speed_hz, 20000000U);
spi->bits_per_word = 8;
ret = spi_setup(spi);
if (ret < 0)

View File

@@ -212,9 +212,13 @@ static int iio_sysfs_trigger_remove(int id)
static int __init iio_sysfs_trig_init(void)
{
int ret;
device_initialize(&iio_sysfs_trig_dev);
dev_set_name(&iio_sysfs_trig_dev, "iio_sysfs_trigger");
return device_add(&iio_sysfs_trig_dev);
ret = device_add(&iio_sysfs_trig_dev);
if (ret)
put_device(&iio_sysfs_trig_dev);
return ret;
}
module_init(iio_sysfs_trig_init);

View File

@@ -192,6 +192,7 @@ static const char * const smbus_pnp_ids[] = {
"SYN3221", /* HP 15-ay000 */
"SYN323d", /* HP Spectre X360 13-w013dx */
"SYN3257", /* HP Envy 13-ad105ng */
"SYN3286", /* HP Laptop 15-da3001TU */
NULL
};

View File

@@ -804,6 +804,7 @@ int __init dmar_dev_scope_init(void)
info = dmar_alloc_pci_notify_info(dev,
BUS_NOTIFY_ADD_DEVICE);
if (!info) {
pci_dev_put(dev);
return dmar_dev_scope_status;
} else {
dmar_pci_bus_add_dev(info);

View File

@@ -2785,6 +2785,7 @@ static int __init si_domain_init(int hw)
if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
domain_exit(si_domain);
si_domain = NULL;
return -EFAULT;
}
@@ -3475,6 +3476,10 @@ free_iommu:
disable_dmar_iommu(iommu);
free_dmar_iommu(iommu);
}
if (si_domain) {
domain_exit(si_domain);
si_domain = NULL;
}
kfree(g_iommus);

View File

@@ -970,7 +970,7 @@ nj_release(struct tiger_hw *card)
}
if (card->irq > 0)
free_irq(card->irq, card);
if (card->isac.dch.dev.dev.class)
if (device_is_registered(&card->isac.dch.dev.dev))
mISDN_unregister_device(&card->isac.dch.dev);
for (i = 0; i < 2; i++) {

View File

@@ -231,7 +231,7 @@ mISDN_register_device(struct mISDNdevice *dev,
err = get_free_devid();
if (err < 0)
goto error1;
return err;
dev->id = err;
device_initialize(&dev->dev);
@@ -242,11 +242,12 @@ mISDN_register_device(struct mISDNdevice *dev,
if (debug & DEBUG_CORE)
printk(KERN_DEBUG "mISDN_register %s %d\n",
dev_name(&dev->dev), dev->id);
dev->dev.class = &mISDN_class;
err = create_stack(dev);
if (err)
goto error1;
dev->dev.class = &mISDN_class;
dev->dev.platform_data = dev;
dev->dev.parent = parent;
dev_set_drvdata(&dev->dev, dev);
@@ -258,8 +259,8 @@ mISDN_register_device(struct mISDNdevice *dev,
error3:
delete_stack(dev);
return err;
error1:
put_device(&dev->dev);
return err;
}

View File

@@ -97,6 +97,7 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
if (!entry)
return -ENOMEM;
INIT_LIST_HEAD(&entry->list);
entry->elem = elem;
entry->dev.class = elements_class;
@@ -131,7 +132,7 @@ err2:
device_unregister(&entry->dev);
return ret;
err1:
kfree(entry);
put_device(&entry->dev);
return ret;
}
EXPORT_SYMBOL(mISDN_dsp_element_register);

View File

@@ -573,7 +573,7 @@ static void list_version_get_needed(struct target_type *tt, void *needed_param)
size_t *needed = needed_param;
*needed += sizeof(struct dm_target_versions);
*needed += strlen(tt->name);
*needed += strlen(tt->name) + 1;
*needed += ALIGN_MASK;
}
@@ -628,7 +628,7 @@ static int list_versions(struct file *filp, struct dm_ioctl *param, size_t param
iter_info.old_vers = NULL;
iter_info.vers = vers;
iter_info.flags = 0;
iter_info.end = (char *)vers+len;
iter_info.end = (char *)vers + needed;
/*
* Now loop through filling out the names & versions.

View File

@@ -6700,7 +6700,7 @@ static int drxk_read_snr(struct dvb_frontend *fe, u16 *snr)
static int drxk_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
struct drxk_state *state = fe->demodulator_priv;
u16 err;
u16 err = 0;
dprintk(1, "\n");

View File

@@ -116,6 +116,8 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv)
dev_dbg(cec->dev, "Buffer overrun (worker did not process previous message)\n");
cec->rx = STATE_BUSY;
cec->msg.len = status >> 24;
if (cec->msg.len > CEC_MAX_MSG_SIZE)
cec->msg.len = CEC_MAX_MSG_SIZE;
cec->msg.rx_status = CEC_RX_STATUS_OK;
s5p_cec_get_rx_buf(cec, cec->msg.len,
cec->msg.msg);

View File

@@ -309,6 +309,28 @@ static int vidioc_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a
return vivid_vid_out_g_fbuf(file, fh, a);
}
/*
* Only support the framebuffer of one of the vivid instances.
* Anything else is rejected.
*/
bool vivid_validate_fb(const struct v4l2_framebuffer *a)
{
struct vivid_dev *dev;
int i;
for (i = 0; i < n_devs; i++) {
dev = vivid_devs[i];
if (!dev || !dev->video_pbase)
continue;
if ((unsigned long)a->base == dev->video_pbase &&
a->fmt.width <= dev->display_width &&
a->fmt.height <= dev->display_height &&
a->fmt.bytesperline <= dev->display_byte_stride)
return true;
}
return false;
}
static int vidioc_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a)
{
struct video_device *vdev = video_devdata(file);

View File

@@ -562,4 +562,6 @@ static inline bool vivid_is_hdmi_out(const struct vivid_dev *dev)
return dev->output_type[dev->output] == HDMI;
}
bool vivid_validate_fb(const struct v4l2_framebuffer *a);
#endif

View File

@@ -458,6 +458,12 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
dev->crop_cap = dev->src_rect;
dev->crop_bounds_cap = dev->src_rect;
if (dev->bitmap_cap &&
(dev->compose_cap.width != dev->crop_cap.width ||
dev->compose_cap.height != dev->crop_cap.height)) {
vfree(dev->bitmap_cap);
dev->bitmap_cap = NULL;
}
dev->compose_cap = dev->crop_cap;
if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap))
dev->compose_cap.height /= 2;
@@ -886,6 +892,8 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_rect *crop = &dev->crop_cap;
struct v4l2_rect *compose = &dev->compose_cap;
unsigned orig_compose_w = compose->width;
unsigned orig_compose_h = compose->height;
unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
int ret;
@@ -1002,17 +1010,17 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
s->r.height /= factor;
}
v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
if (dev->bitmap_cap && (compose->width != s->r.width ||
compose->height != s->r.height)) {
vfree(dev->bitmap_cap);
dev->bitmap_cap = NULL;
}
*compose = s->r;
break;
default:
return -EINVAL;
}
if (dev->bitmap_cap && (compose->width != orig_compose_w ||
compose->height != orig_compose_h)) {
vfree(dev->bitmap_cap);
dev->bitmap_cap = NULL;
}
tpg_s_crop_compose(&dev->tpg, crop, compose);
return 0;
}
@@ -1255,7 +1263,14 @@ int vivid_vid_cap_s_fbuf(struct file *file, void *fh,
return -EINVAL;
if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8)
return -EINVAL;
if (a->fmt.height * a->fmt.bytesperline < a->fmt.sizeimage)
if (a->fmt.bytesperline > a->fmt.sizeimage / a->fmt.height)
return -EINVAL;
/*
* Only support the framebuffer of one of the vivid instances.
* Anything else is rejected.
*/
if (!vivid_validate_fb(a))
return -EINVAL;
dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base);

View File

@@ -172,6 +172,20 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
(bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) ||
(!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE)))
return false;
/* sanity checks for the blanking timings */
if (!bt->interlaced &&
(bt->il_vbackporch || bt->il_vsync || bt->il_vfrontporch))
return false;
if (bt->hfrontporch > 2 * bt->width ||
bt->hsync > 1024 || bt->hbackporch > 1024)
return false;
if (bt->vfrontporch > 4096 ||
bt->vsync > 128 || bt->vbackporch > 4096)
return false;
if (bt->interlaced && (bt->il_vfrontporch > 4096 ||
bt->il_vsync > 128 || bt->il_vbackporch > 4096))
return false;
return fnc == NULL || fnc(t, fnc_handle);
}
EXPORT_SYMBOL_GPL(v4l2_valid_dv_timings);

Some files were not shown because too many files have changed in this diff Show More