Files
kernel_xiaomi_raphael/kernel/smp.c
Blagovest Kolenichev f1b30ef999 Merge android-4.14-p.99 (b952da4) into msm-4.14
* refs/heads/tmp-b952da4:
  Revert "iommu/arm-smmu: Add support for qcom,smmu-v2 variant"
  Linux 4.14.99
  ath9k: dynack: check da->enabled first in sampling routines
  ath9k: dynack: make ewma estimation faster
  perf/x86/intel: Delay memory deallocation until x86_pmu_dead_cpu()
  IB/hfi1: Add limit test for RC/UC send via loopback
  nfsd4: catch some false session retries
  nfsd4: fix cached replies to solo SEQUENCE compounds
  serial: 8250_pci: Make PCI class test non fatal
  serial: fix race between flush_to_ldisc and tty_open
  perf tests evsel-tp-sched: Fix bitwise operator
  perf/core: Don't WARN() for impossible ring-buffer sizes
  x86/MCE: Initialize mce.bank in the case of a fatal error in mce_no_way_out()
  perf/x86/intel/uncore: Add Node ID mask
  cpu/hotplug: Fix "SMT disabled by BIOS" detection for KVM
  KVM: nVMX: unconditionally cancel preemption timer in free_nested (CVE-2019-7221)
  kvm: fix kvm_ioctl_create_device() reference counting (CVE-2019-6974)
  KVM: x86: work around leak of uninitialized stack contents (CVE-2019-7222)
  scsi: aic94xx: fix module loading
  scsi: cxlflash: Prevent deadlock when adapter probe fails
  staging: speakup: fix tty-operation NULL derefs
  usb: gadget: musb: fix short isoc packets with inventra dma
  usb: gadget: udc: net2272: Fix bitwise and boolean operations
  usb: dwc3: gadget: Handle 0 xfer length for OUT EP
  usb: phy: am335x: fix race condition in _probe
  irqchip/gic-v3-its: Plug allocation race for devices sharing a DevID
  futex: Handle early deadlock return correctly
  dmaengine: imx-dma: fix wrong callback invoke
  dmaengine: bcm2835: Fix abort of transactions
  dmaengine: bcm2835: Fix interrupt race on RT
  fuse: handle zero sized retrieve correctly
  fuse: decrement NR_WRITEBACK_TEMP on the right page
  fuse: call pipe_buf_release() under pipe lock
  ALSA: hda - Serialize codec registrations
  ALSA: compress: Fix stop handling on compressed capture streams
  net: dsa: slave: Don't propagate flag changes on down slave interfaces
  net/mlx5e: Force CHECKSUM_UNNECESSARY for short ethernet frames
  net: systemport: Fix WoL with password after deep sleep
  rds: fix refcount bug in rds_sock_addref
  skge: potential memory corruption in skge_get_regs()
  rxrpc: bad unlock balance in rxrpc_recvmsg
  net: dp83640: expire old TX-skb
  enic: fix checksum validation for IPv6
  dccp: fool proof ccid_hc_[rt]x_parse_options()
  thermal: hwmon: inline helpers when CONFIG_THERMAL_HWMON is not set
  scripts/gdb: fix lx-version string output
  exec: load_script: don't blindly truncate shebang string
  fs/epoll: drop ovflist branch prediction
  kernel/hung_task.c: force console verbose before panic
  proc/sysctl: fix return error for proc_doulongvec_minmax()
  kernel/hung_task.c: break RCU locks based on jiffies
  HID: lenovo: Add checks to fix of_led_classdev_register
  thermal: generic-adc: Fix adc to temp interpolation
  kdb: Don't back trace on a cpu that didn't round up
  thermal: bcm2835: enable hwmon explicitly
  block/swim3: Fix -EBUSY error when re-opening device after unmount
  fsl/fman: Use GFP_ATOMIC in {memac,tgec}_add_hash_mac_address()
  gdrom: fix a memory leak bug
  isdn: hisax: hfc_pci: Fix a possible concurrency use-after-free bug in HFCPCI_l1hw()
  ocfs2: improve ocfs2 Makefile
  ocfs2: don't clear bh uptodate for block read
  scripts/decode_stacktrace: only strip base path when a prefix of the path
  cgroup: fix parsing empty mount option string
  f2fs: fix sbi->extent_list corruption issue
  niu: fix missing checks of niu_pci_eeprom_read
  um: Avoid marking pages with "changed protection"
  cifs: check ntwrk_buf_start for NULL before dereferencing it
  MIPS: ralink: Select CONFIG_CPU_MIPSR2_IRQ_VI on MT7620/8
  crypto: ux500 - Use proper enum in hash_set_dma_transfer
  crypto: ux500 - Use proper enum in cryp_set_dma_transfer
  seq_buf: Make seq_buf_puts() null-terminate the buffer
  hwmon: (lm80) fix a missing check of bus read in lm80 probe
  hwmon: (lm80) fix a missing check of the status of SMBus read
  NFS: nfs_compare_mount_options always compare auth flavors.
  kvm: Change offset in kvm_write_guest_offset_cached to unsigned
  powerpc/fadump: Do not allow hot-remove memory from fadump reserved area.
  KVM: x86: svm: report MSR_IA32_MCG_EXT_CTL as unsupported
  pinctrl: meson: meson8b: fix the GPIO function for the GPIOAO pins
  pinctrl: meson: meson8: fix the GPIO function for the GPIOAO pins
  powerpc/mm: Fix reporting of kernel execute faults on the 8xx
  fbdev: fbcon: Fix unregister crash when more than one framebuffer
  ACPI/APEI: Clear GHES block_status before panic()
  igb: Fix an issue that PME is not enabled during runtime suspend
  i40e: define proper net_device::neigh_priv_len
  fbdev: fbmem: behave better with small rotated displays and many CPUs
  md: fix raid10 hang issue caused by barrier
  video: clps711x-fb: release disp device node in probe()
  drbd: Avoid Clang warning about pointless switch statment
  drbd: skip spurious timeout (ping-timeo) when failing promote
  drbd: disconnect, if the wrong UUIDs are attached on a connected peer
  drbd: narrow rcu_read_lock in drbd_sync_handshake
  powerpc/perf: Fix thresholding counter data for unknown type
  cw1200: Fix concurrency use-after-free bugs in cw1200_hw_scan()
  scsi: smartpqi: increase fw status register read timeout
  scsi: smartpqi: correct volume status
  scsi: smartpqi: correct host serial num for ssa
  mlxsw: spectrum: Properly cleanup LAG uppers when removing port from LAG
  Bluetooth: Fix unnecessary error message for HCI request completion
  xfrm6_tunnel: Fix spi check in __xfrm6_tunnel_alloc_spi
  mac80211: fix radiotap vendor presence bitmap handling
  powerpc/uaccess: fix warning/error with access_ok()
  percpu: convert spin_lock_irq to spin_lock_irqsave.
  usb: musb: dsps: fix otg state machine
  arm64: KVM: Skip MMIO insn after emulation
  perf probe: Fix unchecked usage of strncpy()
  perf header: Fix unchecked usage of strncpy()
  perf test: Fix perf_event_attr test failure
  tty: serial: samsung: Properly set flags in autoCTS mode
  mmc: sdhci-xenon: Fix timeout checks
  mmc: sdhci-of-esdhc: Fix timeout checks
  memstick: Prevent memstick host from getting runtime suspended during card detection
  mmc: bcm2835: reset host on timeout
  mmc: bcm2835: Recover from MMC_SEND_EXT_CSD
  KVM: PPC: Book3S: Only report KVM_CAP_SPAPR_TCE_VFIO on powernv machines
  ASoC: fsl: Fix SND_SOC_EUKREA_TLV320 build error on i.MX8M
  ARM: pxa: avoid section mismatch warning
  selftests/bpf: use __bpf_constant_htons in test_prog.c
  switchtec: Fix SWITCHTEC_IOCTL_EVENT_IDX_ALL flags overwrite
  udf: Fix BUG on corrupted inode
  phy: sun4i-usb: add support for missing USB PHY index
  i2c-axxia: check for error conditions first
  OPP: Use opp_table->regulators to verify no regulator case
  cpuidle: big.LITTLE: fix refcount leak
  clk: imx6sl: ensure MMDC CH0 handshake is bypassed
  sata_rcar: fix deferred probing
  iommu/arm-smmu-v3: Use explicit mb() when moving cons pointer
  iommu/arm-smmu: Add support for qcom,smmu-v2 variant
  usb: dwc3: gadget: Disable CSP for stream OUT ep
  watchdog: renesas_wdt: don't set divider while watchdog is running
  ARM: dts: Fix up the D-Link DIR-685 MTD partition info
  media: coda: fix H.264 deblocking filter controls
  mips: bpf: fix encoding bug for mm_srlv32_op
  ARM: dts: Fix OMAP4430 SDP Ethernet startup
  iommu/amd: Fix amd_iommu=force_isolation
  pinctrl: sx150x: handle failure case of devm_kstrdup
  usb: dwc3: trace: add missing break statement to make compiler happy
  IB/hfi1: Unreserve a reserved request when it is completed
  kobject: return error code if writing /sys/.../uevent fails
  driver core: Move async_synchronize_full call
  clk: sunxi-ng: a33: Set CLK_SET_RATE_PARENT for all audio module clocks
  usb: mtu3: fix the issue about SetFeature(U1/U2_Enable)
  timekeeping: Use proper seqcount initializer
  usb: hub: delay hub autosuspend if USB3 port is still link training
  usb: dwc3: Correct the logic for checking TRB full in __dwc3_prepare_one_trb()
  smack: fix access permissions for keyring
  media: DaVinci-VPBE: fix error handling in vpbe_initialize()
  x86/fpu: Add might_fault() to user_insn()
  ARM: dts: mmp2: fix TWSI2
  arm64: ftrace: don't adjust the LR value
  s390/zcrypt: improve special ap message cmd handling
  firmware/efi: Add NULL pointer checks in efivars API functions
  Thermal: do not clear passive state during system sleep
  arm64: io: Ensure value passed to __iormb() is held in a 64-bit register
  drm: Clear state->acquire_ctx before leaving drm_atomic_helper_commit_duplicated_state()
  nfsd4: fix crash on writing v4_end_grace before nfsd startup
  soc: bcm: brcmstb: Don't leak device tree node reference
  sunvdc: Do not spin in an infinite loop when vio_ldc_send() returns EAGAIN
  arm64: io: Ensure calls to delay routines are ordered against prior readX()
  i2c: sh_mobile: add support for r8a77990 (R-Car E3)
  f2fs: fix wrong return value of f2fs_acl_create
  f2fs: fix race between write_checkpoint and write_begin
  f2fs: move dir data flush to write checkpoint process
  staging: pi433: fix potential null dereference
  ACPI: SPCR: Consider baud rate 0 as preconfigured state
  media: adv*/tc358743/ths8200: fill in min width/height/pixelclock
  iio: accel: kxcjk1013: Add KIOX010A ACPI Hardware-ID
  iio: adc: meson-saradc: fix internal clock names
  iio: adc: meson-saradc: check for devm_kasprintf failure
  dmaengine: xilinx_dma: Remove __aligned attribute on zynqmp_dma_desc_ll
  ptp: Fix pass zero to ERR_PTR() in ptp_clock_register
  media: mtk-vcodec: Release device nodes in mtk_vcodec_init_enc_pm()
  soc/tegra: Don't leak device tree node reference
  perf tools: Add Hygon Dhyana support
  modpost: validate symbol names also in find_elf_symbol
  net/mlx5: EQ, Use the right place to store/read IRQ affinity hint
  ARM: OMAP2+: hwmod: Fix some section annotations
  drm/rockchip: fix for mailbox read size
  usbnet: smsc95xx: fix rx packet alignment
  staging: iio: ad7780: update voltage on read
  platform/chrome: don't report EC_MKBP_EVENT_SENSOR_FIFO as wakeup
  Tools: hv: kvp: Fix a warning of buffer overflow with gcc 8.0.1
  fpga: altera-cvp: Fix registration for CvP incapable devices
  staging:iio:ad2s90: Make probe handle spi_setup failure
  MIPS: Boston: Disable EG20T prefetch
  ptp: check gettime64 return code in PTP_SYS_OFFSET ioctl
  serial: fsl_lpuart: clear parity enable bit when disable parity
  drm/vc4: ->x_scaling[1] should never be set to VC4_SCALING_NONE
  crypto: aes_ti - disable interrupts while accessing S-box
  powerpc/pseries: add of_node_put() in dlpar_detach_node()
  x86/PCI: Fix Broadcom CNB20LE unintended sign extension (redux)
  dlm: Don't swamp the CPU with callbacks queued during recovery
  clk: boston: fix possible memory leak in clk_boston_setup()
  ARM: 8808/1: kexec:offline panic_smp_self_stop CPU
  scsi: lpfc: Fix LOGO/PLOGI handling when triggerd by ABTS Timeout event
  scsi: mpt3sas: Call sas_remove_host before removing the target devices
  scsi: lpfc: Correct LCB RJT handling
  ath9k: dynack: use authentication messages for 'late' ack
  gpu: ipu-v3: image-convert: Prevent race between run and unprepare
  ASoC: Intel: mrfld: fix uninitialized variable access
  pinctrl: bcm2835: Use raw spinlock for RT compatibility
  drm/vgem: Fix vgem_init to get drm device available.
  staging: iio: adc: ad7280a: handle error from __ad7280_read32()
  drm/bufs: Fix Spectre v1 vulnerability

Conflicts:
	drivers/thermal/thermal_core.c

File below is aligned according to change [1] from this LTS import:

    arch/arm64/include/asm/io.h

[1] arm64: io: Ensure calls to delay routines are ordered against prior readX()

Change-Id: Ieae90a5ca7b81b08fcfedb150da732f5986aefe5
Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
2019-03-01 11:14:23 -08:00

832 lines
22 KiB
C

/*
* Generic helpers for smp ipi calls
*
* (C) Jens Axboe <jens.axboe@oracle.com> 2008
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/irq_work.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/sched/idle.h>
#include <linux/hypervisor.h>
#include <linux/suspend.h>
#include "smpboot.h"
enum {
CSD_FLAG_LOCK = 0x01,
CSD_FLAG_SYNCHRONOUS = 0x02,
};
struct call_function_data {
call_single_data_t __percpu *csd;
cpumask_var_t cpumask;
cpumask_var_t cpumask_ipi;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
static void flush_smp_call_function_queue(bool warn_cpu_offline);
/* CPU mask indicating which CPUs to bring online during smp_init() */
static bool have_boot_cpu_mask;
static cpumask_var_t boot_cpu_mask;
int smpcfd_prepare_cpu(unsigned int cpu)
{
struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
cpu_to_node(cpu)))
return -ENOMEM;
if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
cpu_to_node(cpu))) {
free_cpumask_var(cfd->cpumask);
return -ENOMEM;
}
cfd->csd = alloc_percpu(call_single_data_t);
if (!cfd->csd) {
free_cpumask_var(cfd->cpumask);
free_cpumask_var(cfd->cpumask_ipi);
return -ENOMEM;
}
return 0;
}
int smpcfd_dead_cpu(unsigned int cpu)
{
struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
free_cpumask_var(cfd->cpumask);
free_cpumask_var(cfd->cpumask_ipi);
free_percpu(cfd->csd);
return 0;
}
int smpcfd_dying_cpu(unsigned int cpu)
{
/*
* The IPIs for the smp-call-function callbacks queued by other
* CPUs might arrive late, either due to hardware latencies or
* because this CPU disabled interrupts (inside stop-machine)
* before the IPIs were sent. So flush out any pending callbacks
* explicitly (without waiting for the IPIs to arrive), to
* ensure that the outgoing CPU doesn't go offline with work
* still pending.
*/
flush_smp_call_function_queue(false);
return 0;
}
void __init call_function_init(void)
{
int i;
for_each_possible_cpu(i)
init_llist_head(&per_cpu(call_single_queue, i));
smpcfd_prepare_cpu(smp_processor_id());
}
/*
* csd_lock/csd_unlock used to serialize access to per-cpu csd resources
*
* For non-synchronous ipi calls the csd can still be in use by the
* previous function call. For multi-cpu calls its even more interesting
* as we'll have to ensure no other cpu is observing our csd.
*/
static __always_inline void csd_lock_wait(call_single_data_t *csd)
{
smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
}
static __always_inline void csd_lock(call_single_data_t *csd)
{
csd_lock_wait(csd);
csd->flags |= CSD_FLAG_LOCK;
/*
* prevent CPU from reordering the above assignment
* to ->flags with any subsequent assignments to other
* fields of the specified call_single_data_t structure:
*/
smp_wmb();
}
static __always_inline void csd_unlock(call_single_data_t *csd)
{
WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
/*
* ensure we're all done before releasing data:
*/
smp_store_release(&csd->flags, 0);
}
static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
/*
* Insert a previously allocated call_single_data_t element
* for execution on the given CPU. data must already have
* ->func, ->info, and ->flags set.
*/
static int generic_exec_single(int cpu, call_single_data_t *csd,
smp_call_func_t func, void *info)
{
if (cpu == smp_processor_id()) {
unsigned long flags;
/*
* We can unlock early even for the synchronous on-stack case,
* since we're doing this from the same CPU..
*/
csd_unlock(csd);
local_irq_save(flags);
func(info);
local_irq_restore(flags);
return 0;
}
if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
csd_unlock(csd);
return -ENXIO;
}
csd->func = func;
csd->info = info;
/*
* The list addition should be visible before sending the IPI
* handler locks the list to pull the entry off it because of
* normal cache coherency rules implied by spinlocks.
*
* If IPIs can go out of order to the cache coherency protocol
* in an architecture, sufficient synchronisation should be added
* to arch code to make it appear to obey cache coherency WRT
* locking and barrier primitives. Generic code isn't really
* equipped to do the right thing...
*/
if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
arch_send_call_function_single_ipi(cpu);
return 0;
}
/**
* generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
*
* Invoked by arch to handle an IPI for call function single.
* Must be called with interrupts disabled.
*/
void generic_smp_call_function_single_interrupt(void)
{
flush_smp_call_function_queue(true);
}
/**
* flush_smp_call_function_queue - Flush pending smp-call-function callbacks
*
* @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
* offline CPU. Skip this check if set to 'false'.
*
* Flush any pending smp-call-function callbacks queued on this CPU. This is
* invoked by the generic IPI handler, as well as by a CPU about to go offline,
* to ensure that all pending IPI callbacks are run before it goes completely
* offline.
*
* Loop through the call_single_queue and run all the queued callbacks.
* Must be called with interrupts disabled.
*/
static void flush_smp_call_function_queue(bool warn_cpu_offline)
{
struct llist_head *head;
struct llist_node *entry;
call_single_data_t *csd, *csd_next;
static bool warned;
WARN_ON(!irqs_disabled());
head = this_cpu_ptr(&call_single_queue);
entry = llist_del_all(head);
entry = llist_reverse_order(entry);
/* There shouldn't be any pending callbacks on an offline CPU. */
if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
!warned && !llist_empty(head))) {
warned = true;
WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
/*
* We don't have to use the _safe() variant here
* because we are not invoking the IPI handlers yet.
*/
llist_for_each_entry(csd, entry, llist)
pr_warn("IPI callback %pS sent to offline CPU\n",
csd->func);
}
llist_for_each_entry_safe(csd, csd_next, entry, llist) {
smp_call_func_t func = csd->func;
void *info = csd->info;
/* Do we wait until *after* callback? */
if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
func(info);
csd_unlock(csd);
} else {
csd_unlock(csd);
func(info);
}
}
/*
* Handle irq works queued remotely by irq_work_queue_on().
* Smp functions above are typically synchronous so they
* better run first since some other CPUs may be busy waiting
* for them.
*/
irq_work_run();
}
/*
* smp_call_function_single - Run a function on a specific CPU
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*/
int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
int wait)
{
call_single_data_t *csd;
call_single_data_t csd_stack = {
.flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS,
};
int this_cpu;
int err;
/*
* prevent preemption and reschedule on another processor,
* as well as CPU removal
*/
this_cpu = get_cpu();
/*
* Can deadlock when called with interrupts disabled.
* We allow cpu's that are not yet online though, as no one else can
* send smp call function interrupt to this cpu and as such deadlocks
* can't happen.
*/
WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
&& !oops_in_progress);
csd = &csd_stack;
if (!wait) {
csd = this_cpu_ptr(&csd_data);
csd_lock(csd);
}
err = generic_exec_single(cpu, csd, func, info);
if (wait)
csd_lock_wait(csd);
put_cpu();
return err;
}
EXPORT_SYMBOL(smp_call_function_single);
/**
* smp_call_function_single_async(): Run an asynchronous function on a
* specific CPU.
* @cpu: The CPU to run on.
* @csd: Pre-allocated and setup data structure
*
* Like smp_call_function_single(), but the call is asynchonous and
* can thus be done from contexts with disabled interrupts.
*
* The caller passes his own pre-allocated data structure
* (ie: embedded in an object) and is responsible for synchronizing it
* such that the IPIs performed on the @csd are strictly serialized.
*
* NOTE: Be careful, there is unfortunately no current debugging facility to
* validate the correctness of this serialization.
*/
int smp_call_function_single_async(int cpu, call_single_data_t *csd)
{
int err = 0;
preempt_disable();
/* We could deadlock if we have to wait here with interrupts disabled! */
if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
csd_lock_wait(csd);
csd->flags = CSD_FLAG_LOCK;
smp_wmb();
err = generic_exec_single(cpu, csd, csd->func, csd->info);
preempt_enable();
return err;
}
EXPORT_SYMBOL_GPL(smp_call_function_single_async);
/*
* smp_call_function_any - Run a function on any of the given cpus
* @mask: The mask of cpus it can run on.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait until function has completed.
*
* Returns 0 on success, else a negative status code (if no cpus were online).
*
* Selection preference:
* 1) current cpu if in @mask
* 2) any cpu of current node if in @mask
* 3) any other online cpu in @mask
*/
int smp_call_function_any(const struct cpumask *mask,
smp_call_func_t func, void *info, int wait)
{
unsigned int cpu;
const struct cpumask *nodemask;
int ret;
/* Try for same CPU (cheapest) */
cpu = get_cpu();
if (cpumask_test_cpu(cpu, mask))
goto call;
/* Try for same node. */
nodemask = cpumask_of_node(cpu_to_node(cpu));
for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
cpu = cpumask_next_and(cpu, nodemask, mask)) {
if (cpu_online(cpu))
goto call;
}
/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
cpu = cpumask_any_and(mask, cpu_online_mask);
call:
ret = smp_call_function_single(cpu, func, info, wait);
put_cpu();
return ret;
}
EXPORT_SYMBOL_GPL(smp_call_function_any);
/**
* smp_call_function_many(): Run a function on a set of other CPUs.
* @mask: The set of cpus to run on (only runs on online subset).
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed
* on other CPUs.
*
* If @wait is true, then returns once @func has returned.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler. Preemption
* must be disabled when calling this function.
*/
void smp_call_function_many(const struct cpumask *mask,
smp_call_func_t func, void *info, bool wait)
{
struct call_function_data *cfd;
int cpu, next_cpu, this_cpu = smp_processor_id();
/*
* Can deadlock when called with interrupts disabled.
* We allow cpu's that are not yet online though, as no one else can
* send smp call function interrupt to this cpu and as such deadlocks
* can't happen.
*/
WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
&& !oops_in_progress && !early_boot_irqs_disabled);
/* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
cpu = cpumask_first_and(mask, cpu_online_mask);
if (cpu == this_cpu)
cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
/* No online cpus? We're done. */
if (cpu >= nr_cpu_ids)
return;
/* Do we have another CPU which isn't us? */
next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
if (next_cpu == this_cpu)
next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
/* Fastpath: do that cpu by itself. */
if (next_cpu >= nr_cpu_ids) {
smp_call_function_single(cpu, func, info, wait);
return;
}
cfd = this_cpu_ptr(&cfd_data);
cpumask_and(cfd->cpumask, mask, cpu_online_mask);
__cpumask_clear_cpu(this_cpu, cfd->cpumask);
/* Some callers race with other cpus changing the passed mask */
if (unlikely(!cpumask_weight(cfd->cpumask)))
return;
cpumask_clear(cfd->cpumask_ipi);
for_each_cpu(cpu, cfd->cpumask) {
call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
csd_lock(csd);
if (wait)
csd->flags |= CSD_FLAG_SYNCHRONOUS;
csd->func = func;
csd->info = info;
if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
}
/* Send a message to all CPUs in the map */
arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
if (wait) {
for_each_cpu(cpu, cfd->cpumask) {
call_single_data_t *csd;
csd = per_cpu_ptr(cfd->csd, cpu);
csd_lock_wait(csd);
}
}
}
EXPORT_SYMBOL(smp_call_function_many);
/**
* smp_call_function(): Run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed
* on other CPUs.
*
* Returns 0.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int smp_call_function(smp_call_func_t func, void *info, int wait)
{
preempt_disable();
smp_call_function_many(cpu_online_mask, func, info, wait);
preempt_enable();
return 0;
}
EXPORT_SYMBOL(smp_call_function);
/* Setup configured maximum number of CPUs to activate */
unsigned int setup_max_cpus = NR_CPUS;
EXPORT_SYMBOL(setup_max_cpus);
/*
* Setup routine for controlling SMP activation
*
* Command-line option of "nosmp" or "maxcpus=0" will disable SMP
* activation entirely (the MPS table probe still happens, though).
*
* Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
* greater than 0, limits the maximum number of CPUs activated in
* SMP mode to <NUM>.
*/
void __weak arch_disable_smp_support(void) { }
static int __init nosmp(char *str)
{
setup_max_cpus = 0;
arch_disable_smp_support();
return 0;
}
early_param("nosmp", nosmp);
/* this is hard limit */
static int __init nrcpus(char *str)
{
int nr_cpus;
get_option(&str, &nr_cpus);
if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
nr_cpu_ids = nr_cpus;
return 0;
}
early_param("nr_cpus", nrcpus);
static int __init maxcpus(char *str)
{
get_option(&str, &setup_max_cpus);
if (setup_max_cpus == 0)
arch_disable_smp_support();
return 0;
}
early_param("maxcpus", maxcpus);
static int __init boot_cpus(char *str)
{
alloc_bootmem_cpumask_var(&boot_cpu_mask);
if (cpulist_parse(str, boot_cpu_mask) < 0) {
pr_warn("SMP: Incorrect boot_cpus cpumask\n");
return -EINVAL;
}
have_boot_cpu_mask = true;
return 0;
}
early_param("boot_cpus", boot_cpus);
/* Setup number of possible processor ids */
unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
EXPORT_SYMBOL(nr_cpu_ids);
/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
void __init setup_nr_cpu_ids(void)
{
nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
}
static inline bool boot_cpu(int cpu)
{
if (!have_boot_cpu_mask)
return true;
return cpumask_test_cpu(cpu, boot_cpu_mask);
}
static inline void free_boot_cpu_mask(void)
{
if (have_boot_cpu_mask) /* Allocated from boot_cpus() */
free_bootmem_cpumask_var(boot_cpu_mask);
}
/* Called by boot processor to activate the rest. */
void __init smp_init(void)
{
int num_nodes, num_cpus;
unsigned int cpu;
idle_threads_init();
cpuhp_threads_init();
pr_info("Bringing up secondary CPUs ...\n");
/* FIXME: This should be done in userspace --RR */
for_each_present_cpu(cpu) {
if (num_online_cpus() >= setup_max_cpus)
break;
if (!cpu_online(cpu) && boot_cpu(cpu))
cpu_up(cpu);
}
free_boot_cpu_mask();
num_nodes = num_online_nodes();
num_cpus = num_online_cpus();
pr_info("Brought up %d node%s, %d CPU%s\n",
num_nodes, (num_nodes > 1 ? "s" : ""),
num_cpus, (num_cpus > 1 ? "s" : ""));
/* Any cleanup work */
smp_cpus_done(setup_max_cpus);
}
/*
* Call a function on all processors. May be used during early boot while
* early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
* of local_irq_disable/enable().
*/
int on_each_cpu(void (*func) (void *info), void *info, int wait)
{
unsigned long flags;
int ret = 0;
preempt_disable();
ret = smp_call_function(func, info, wait);
local_irq_save(flags);
func(info);
local_irq_restore(flags);
preempt_enable();
return ret;
}
EXPORT_SYMBOL(on_each_cpu);
/**
* on_each_cpu_mask(): Run a function on processors specified by
* cpumask, which may include the local processor.
* @mask: The set of cpus to run on (only runs on online subset).
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed
* on other CPUs.
*
* If @wait is true, then returns once @func has returned.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler. The
* exception is that it may be used during early boot while
* early_boot_irqs_disabled is set.
*/
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
void *info, bool wait)
{
int cpu = get_cpu();
smp_call_function_many(mask, func, info, wait);
if (cpumask_test_cpu(cpu, mask)) {
unsigned long flags;
local_irq_save(flags);
func(info);
local_irq_restore(flags);
}
put_cpu();
}
EXPORT_SYMBOL(on_each_cpu_mask);
/*
* on_each_cpu_cond(): Call a function on each processor for which
* the supplied function cond_func returns true, optionally waiting
* for all the required CPUs to finish. This may include the local
* processor.
* @cond_func: A callback function that is passed a cpu id and
* the the info parameter. The function is called
* with preemption disabled. The function should
* return a blooean value indicating whether to IPI
* the specified CPU.
* @func: The function to run on all applicable CPUs.
* This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to both functions.
* @wait: If true, wait (atomically) until function has
* completed on other CPUs.
* @gfp_flags: GFP flags to use when allocating the cpumask
* used internally by the function.
*
* The function might sleep if the GFP flags indicates a non
* atomic allocation is allowed.
*
* Preemption is disabled to protect against CPUs going offline but not online.
* CPUs going online during the call will not be seen or sent an IPI.
*
* You must not call this function with disabled interrupts or
* from a hardware interrupt handler or from a bottom half handler.
*/
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
smp_call_func_t func, void *info, bool wait,
gfp_t gfp_flags)
{
cpumask_var_t cpus;
int cpu, ret;
might_sleep_if(gfpflags_allow_blocking(gfp_flags));
if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
preempt_disable();
for_each_online_cpu(cpu)
if (cond_func(cpu, info))
cpumask_set_cpu(cpu, cpus);
on_each_cpu_mask(cpus, func, info, wait);
preempt_enable();
free_cpumask_var(cpus);
} else {
/*
* No free cpumask, bother. No matter, we'll
* just have to IPI them one by one.
*/
preempt_disable();
for_each_online_cpu(cpu)
if (cond_func(cpu, info)) {
ret = smp_call_function_single(cpu, func,
info, wait);
WARN_ON_ONCE(ret);
}
preempt_enable();
}
}
EXPORT_SYMBOL(on_each_cpu_cond);
static void do_nothing(void *unused)
{
}
/**
* kick_all_cpus_sync - Force all cpus out of idle
*
* Used to synchronize the update of pm_idle function pointer. It's
* called after the pointer is updated and returns after the dummy
* callback function has been executed on all cpus. The execution of
* the function can only happen on the remote cpus after they have
* left the idle function which had been called via pm_idle function
* pointer. So it's guaranteed that nothing uses the previous pointer
* anymore.
*/
void kick_all_cpus_sync(void)
{
/* Make sure the change is visible before we kick the cpus */
smp_mb();
smp_call_function(do_nothing, NULL, 1);
}
EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
/**
* wake_up_all_idle_cpus - break all cpus out of idle
* wake_up_all_idle_cpus try to break all cpus which is in idle state even
* including idle polling cpus, for non-idle cpus, we will do nothing
* for them.
*/
void wake_up_all_idle_cpus(void)
{
int cpu;
preempt_disable();
for_each_online_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
if (s2idle_state == S2IDLE_STATE_ENTER ||
!cpu_isolated(cpu))
wake_up_if_idle(cpu);
}
preempt_enable();
}
EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
/**
* smp_call_on_cpu - Call a function on a specific cpu
*
* Used to call a function on a specific cpu and wait for it to return.
* Optionally make sure the call is done on a specified physical cpu via vcpu
* pinning in order to support virtualized environments.
*/
struct smp_call_on_cpu_struct {
struct work_struct work;
struct completion done;
int (*func)(void *);
void *data;
int ret;
int cpu;
};
static void smp_call_on_cpu_callback(struct work_struct *work)
{
struct smp_call_on_cpu_struct *sscs;
sscs = container_of(work, struct smp_call_on_cpu_struct, work);
if (sscs->cpu >= 0)
hypervisor_pin_vcpu(sscs->cpu);
sscs->ret = sscs->func(sscs->data);
if (sscs->cpu >= 0)
hypervisor_pin_vcpu(-1);
complete(&sscs->done);
}
int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
{
struct smp_call_on_cpu_struct sscs = {
.done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
.func = func,
.data = par,
.cpu = phys ? cpu : -1,
};
INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
if (cpu >= nr_cpu_ids || !cpu_online(cpu))
return -ENXIO;
queue_work_on(cpu, system_wq, &sscs.work);
wait_for_completion(&sscs.done);
return sscs.ret;
}
EXPORT_SYMBOL_GPL(smp_call_on_cpu);