Merge 4.9.114 into android-4.9
Changes in 4.9.114 MIPS: Use async IPIs for arch_trigger_cpumask_backtrace() compiler, clang: suppress warning for unused static inline functions compiler, clang: properly override 'inline' for clang compiler, clang: always inline when CONFIG_OPTIMIZE_INLINING is disabled compiler-gcc.h: Add __attribute__((gnu_inline)) to all inline declarations x86/asm: Add _ASM_ARG* constants for argument registers to <asm/asm.h> x86/paravirt: Make native_save_fl() extern inline ocfs2: subsystem.su_mutex is required while accessing the item->ci_parent ocfs2: ip_alloc_sem should be taken in ocfs2_get_block() mtd: m25p80: consider max message size in m25p80_read bcm63xx_enet: correct clock usage bcm63xx_enet: do not write to random DMA channel on BCM6345 crypto: crypto4xx - remove bad list_del crypto: crypto4xx - fix crypto4xx_build_pdr, crypto4xx_build_sdr leak atm: zatm: Fix potential Spectre v1 ipvlan: fix IFLA_MTU ignored on NEWLINK net: dccp: avoid crash in ccid3_hc_rx_send_feedback() net: dccp: switch rx_tstamp_last_feedback to monotonic clock net/mlx5: Fix incorrect raw command length parsing net/mlx5: Fix wrong size allocation for QoS ETC TC regitster net_sched: blackhole: tell upper qdisc about dropped packets net: sungem: fix rx checksum support qed: Fix use of incorrect size in memcpy call. qed: Limit msix vectors in kdump kernel to the minimum required count. qmi_wwan: add support for the Dell Wireless 5821e module r8152: napi hangup fix after disconnect tcp: fix Fast Open key endianness tcp: prevent bogus FRTO undos with non-SACK flows vhost_net: validate sock before trying to put its fd net/packet: fix use-after-free net/mlx5: Fix command interface race in polling mode net: cxgb3_main: fix potential Spectre v1 rtlwifi: rtl8821ae: fix firmware is not ready to run net: lan78xx: Fix race in tx pending skb size calculation netfilter: ebtables: reject non-bridge targets reiserfs: fix buffer overflow with long warning messages KEYS: DNS: fix parsing multiple options netfilter: ipv6: nf_defrag: drop skb dst before queueing rds: avoid unenecessary cong_update in loop transport net/nfc: Avoid stalls when nfc_alloc_send_skb() returned NULL. arm64: assembler: introduce ldr_this_cpu KVM: arm64: Store vcpu on the stack during __guest_enter() KVM: arm/arm64: Convert kvm_host_cpu_state to a static per-cpu allocation KVM: arm64: Change hyp_panic()s dependency on tpidr_el2 arm64: alternatives: use tpidr_el2 on VHE hosts KVM: arm64: Stop save/restoring host tpidr_el1 on VHE arm64: alternatives: Add dynamic patching feature KVM: arm/arm64: Do not use kern_hyp_va() with kvm_vgic_global_state KVM: arm64: Avoid storing the vcpu pointer on the stack arm/arm64: smccc: Add SMCCC-specific return codes arm64: Call ARCH_WORKAROUND_2 on transitions between EL0 and EL1 arm64: Add per-cpu infrastructure to call ARCH_WORKAROUND_2 arm64: Add ARCH_WORKAROUND_2 probing arm64: Add 'ssbd' command-line option arm64: ssbd: Add global mitigation state accessor arm64: ssbd: Skip apply_ssbd if not using dynamic mitigation arm64: ssbd: Restore mitigation status on CPU resume arm64: ssbd: Introduce thread flag to control userspace mitigation arm64: ssbd: Add prctl interface for per-thread mitigation arm64: KVM: Add HYP per-cpu accessors arm64: KVM: Add ARCH_WORKAROUND_2 support for guests arm64: KVM: Handle guest's ARCH_WORKAROUND_2 requests arm64: KVM: Add ARCH_WORKAROUND_2 discovery through ARCH_FEATURES_FUNC_ID string: drop __must_check from strscpy() and restore strscpy() usages in cgroup Linux 4.9.114 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
@@ -4035,6 +4035,23 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
spia_pedr=
|
||||
spia_peddr=
|
||||
|
||||
ssbd= [ARM64,HW]
|
||||
Speculative Store Bypass Disable control
|
||||
|
||||
On CPUs that are vulnerable to the Speculative
|
||||
Store Bypass vulnerability and offer a
|
||||
firmware based mitigation, this parameter
|
||||
indicates how the mitigation should be used:
|
||||
|
||||
force-on: Unconditionally enable mitigation for
|
||||
for both kernel and userspace
|
||||
force-off: Unconditionally disable mitigation for
|
||||
for both kernel and userspace
|
||||
kernel: Always enable mitigation in the
|
||||
kernel, and offer a prctl interface
|
||||
to allow userspace to register its
|
||||
interest in being mitigated too.
|
||||
|
||||
stack_guard_gap= [MM]
|
||||
override the default stack gap protection. The value
|
||||
is in page units and it defines how many pages prior
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,6 +1,6 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 113
|
||||
SUBLEVEL = 114
|
||||
EXTRAVERSION =
|
||||
NAME = Roaring Lionus
|
||||
|
||||
|
||||
@@ -327,4 +327,16 @@ static inline bool kvm_arm_harden_branch_predictor(void)
|
||||
return false;
|
||||
}
|
||||
|
||||
#define KVM_SSBD_UNKNOWN -1
|
||||
#define KVM_SSBD_FORCE_DISABLE 0
|
||||
#define KVM_SSBD_KERNEL 1
|
||||
#define KVM_SSBD_FORCE_ENABLE 2
|
||||
#define KVM_SSBD_MITIGATED 3
|
||||
|
||||
static inline int kvm_arm_have_ssbd(void)
|
||||
{
|
||||
/* No way to detect it yet, pretend it is not there. */
|
||||
return KVM_SSBD_UNKNOWN;
|
||||
}
|
||||
|
||||
#endif /* __ARM_KVM_HOST_H__ */
|
||||
|
||||
@@ -28,6 +28,13 @@
|
||||
*/
|
||||
#define kern_hyp_va(kva) (kva)
|
||||
|
||||
/* Contrary to arm64, there is no need to generate a PC-relative address */
|
||||
#define hyp_symbol_addr(s) \
|
||||
({ \
|
||||
typeof(s) *addr = &(s); \
|
||||
addr; \
|
||||
})
|
||||
|
||||
/*
|
||||
* KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
|
||||
*/
|
||||
@@ -249,6 +256,11 @@ static inline int kvm_map_vectors(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int hyp_map_aux_data(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* __ARM_KVM_MMU_H__ */
|
||||
|
||||
@@ -51,8 +51,8 @@
|
||||
__asm__(".arch_extension virt");
|
||||
#endif
|
||||
|
||||
DEFINE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
|
||||
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
|
||||
static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
|
||||
static unsigned long hyp_default_vectors;
|
||||
|
||||
/* Per-CPU variable containing the currently running vcpu. */
|
||||
@@ -338,7 +338,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
}
|
||||
|
||||
vcpu->cpu = cpu;
|
||||
vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
|
||||
vcpu->arch.host_cpu_context = this_cpu_ptr(&kvm_host_cpu_state);
|
||||
|
||||
kvm_arm_set_running_vcpu(vcpu);
|
||||
}
|
||||
@@ -1199,19 +1199,8 @@ static inline void hyp_cpu_pm_exit(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void teardown_common_resources(void)
|
||||
{
|
||||
free_percpu(kvm_host_cpu_state);
|
||||
}
|
||||
|
||||
static int init_common_resources(void)
|
||||
{
|
||||
kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
|
||||
if (!kvm_host_cpu_state) {
|
||||
kvm_err("Cannot allocate host CPU state\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* set size of VMID supported by CPU */
|
||||
kvm_vmid_bits = kvm_get_vmid_bits();
|
||||
kvm_info("%d-bit VMID\n", kvm_vmid_bits);
|
||||
@@ -1369,7 +1358,7 @@ static int init_hyp_mode(void)
|
||||
for_each_possible_cpu(cpu) {
|
||||
kvm_cpu_context_t *cpu_ctxt;
|
||||
|
||||
cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
|
||||
cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu);
|
||||
err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
|
||||
|
||||
if (err) {
|
||||
@@ -1378,6 +1367,12 @@ static int init_hyp_mode(void)
|
||||
}
|
||||
}
|
||||
|
||||
err = hyp_map_aux_data();
|
||||
if (err) {
|
||||
kvm_err("Cannot map host auxilary data: %d\n", err);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
kvm_info("Hyp mode initialized successfully\n");
|
||||
|
||||
return 0;
|
||||
@@ -1447,7 +1442,6 @@ int kvm_arch_init(void *opaque)
|
||||
out_hyp:
|
||||
teardown_hyp_mode();
|
||||
out_err:
|
||||
teardown_common_resources();
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -403,7 +403,7 @@ static int kvm_psci_call(struct kvm_vcpu *vcpu)
|
||||
int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 func_id = smccc_get_function(vcpu);
|
||||
u32 val = PSCI_RET_NOT_SUPPORTED;
|
||||
u32 val = SMCCC_RET_NOT_SUPPORTED;
|
||||
u32 feature;
|
||||
|
||||
switch (func_id) {
|
||||
@@ -415,7 +415,21 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
|
||||
switch(feature) {
|
||||
case ARM_SMCCC_ARCH_WORKAROUND_1:
|
||||
if (kvm_arm_harden_branch_predictor())
|
||||
val = 0;
|
||||
val = SMCCC_RET_SUCCESS;
|
||||
break;
|
||||
case ARM_SMCCC_ARCH_WORKAROUND_2:
|
||||
switch (kvm_arm_have_ssbd()) {
|
||||
case KVM_SSBD_FORCE_DISABLE:
|
||||
case KVM_SSBD_UNKNOWN:
|
||||
break;
|
||||
case KVM_SSBD_KERNEL:
|
||||
val = SMCCC_RET_SUCCESS;
|
||||
break;
|
||||
case KVM_SSBD_FORCE_ENABLE:
|
||||
case KVM_SSBD_MITIGATED:
|
||||
val = SMCCC_RET_NOT_REQUIRED;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -778,6 +778,15 @@ config HARDEN_BRANCH_PREDICTOR
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_SSBD
|
||||
bool "Speculative Store Bypass Disable" if EXPERT
|
||||
default y
|
||||
help
|
||||
This enables mitigation of the bypassing of previous stores
|
||||
by speculative loads.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
menuconfig ARMV8_DEPRECATED
|
||||
bool "Emulate deprecated/obsolete ARMv8 instructions"
|
||||
depends on COMPAT
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
#include <asm/cpucaps.h>
|
||||
#include <asm/insn.h>
|
||||
|
||||
#define ARM64_CB_PATCH ARM64_NCAPS
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/init.h>
|
||||
@@ -11,6 +13,8 @@
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/stringify.h>
|
||||
|
||||
extern int alternatives_applied;
|
||||
|
||||
struct alt_instr {
|
||||
s32 orig_offset; /* offset to original instruction */
|
||||
s32 alt_offset; /* offset to replacement instruction */
|
||||
@@ -19,12 +23,19 @@ struct alt_instr {
|
||||
u8 alt_len; /* size of new instruction(s), <= orig_len */
|
||||
};
|
||||
|
||||
typedef void (*alternative_cb_t)(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst);
|
||||
|
||||
void __init apply_alternatives_all(void);
|
||||
void apply_alternatives(void *start, size_t length);
|
||||
|
||||
#define ALTINSTR_ENTRY(feature) \
|
||||
#define ALTINSTR_ENTRY(feature,cb) \
|
||||
" .word 661b - .\n" /* label */ \
|
||||
" .if " __stringify(cb) " == 0\n" \
|
||||
" .word 663f - .\n" /* new instruction */ \
|
||||
" .else\n" \
|
||||
" .word " __stringify(cb) "- .\n" /* callback */ \
|
||||
" .endif\n" \
|
||||
" .hword " __stringify(feature) "\n" /* feature bit */ \
|
||||
" .byte 662b-661b\n" /* source len */ \
|
||||
" .byte 664f-663f\n" /* replacement len */
|
||||
@@ -42,15 +53,18 @@ void apply_alternatives(void *start, size_t length);
|
||||
* but most assemblers die if insn1 or insn2 have a .inst. This should
|
||||
* be fixed in a binutils release posterior to 2.25.51.0.2 (anything
|
||||
* containing commit 4e4d08cf7399b606 or c1baaddf8861).
|
||||
*
|
||||
* Alternatives with callbacks do not generate replacement instructions.
|
||||
*/
|
||||
#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \
|
||||
#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \
|
||||
".if "__stringify(cfg_enabled)" == 1\n" \
|
||||
"661:\n\t" \
|
||||
oldinstr "\n" \
|
||||
"662:\n" \
|
||||
".pushsection .altinstructions,\"a\"\n" \
|
||||
ALTINSTR_ENTRY(feature) \
|
||||
ALTINSTR_ENTRY(feature,cb) \
|
||||
".popsection\n" \
|
||||
" .if " __stringify(cb) " == 0\n" \
|
||||
".pushsection .altinstr_replacement, \"a\"\n" \
|
||||
"663:\n\t" \
|
||||
newinstr "\n" \
|
||||
@@ -58,11 +72,17 @@ void apply_alternatives(void *start, size_t length);
|
||||
".popsection\n\t" \
|
||||
".org . - (664b-663b) + (662b-661b)\n\t" \
|
||||
".org . - (662b-661b) + (664b-663b)\n" \
|
||||
".else\n\t" \
|
||||
"663:\n\t" \
|
||||
"664:\n\t" \
|
||||
".endif\n" \
|
||||
".endif\n"
|
||||
|
||||
#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
|
||||
__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
|
||||
__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0)
|
||||
|
||||
#define ALTERNATIVE_CB(oldinstr, cb) \
|
||||
__ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb)
|
||||
#else
|
||||
|
||||
#include <asm/assembler.h>
|
||||
@@ -129,6 +149,14 @@ void apply_alternatives(void *start, size_t length);
|
||||
661:
|
||||
.endm
|
||||
|
||||
.macro alternative_cb cb
|
||||
.set .Lasm_alt_mode, 0
|
||||
.pushsection .altinstructions, "a"
|
||||
altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0
|
||||
.popsection
|
||||
661:
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Provide the other half of the alternative code sequence.
|
||||
*/
|
||||
@@ -154,6 +182,13 @@ void apply_alternatives(void *start, size_t length);
|
||||
.org . - (662b-661b) + (664b-663b)
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Callback-based alternative epilogue
|
||||
*/
|
||||
.macro alternative_cb_end
|
||||
662:
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Provides a trivial alternative or default sequence consisting solely
|
||||
* of NOPs. The number of NOPs is chosen automatically to match the
|
||||
|
||||
@@ -269,6 +269,36 @@ lr .req x30 // link register
|
||||
ldr \dst, [\dst, \tmp]
|
||||
.endm
|
||||
|
||||
/*
|
||||
* @dst: Result of per_cpu(sym, smp_processor_id())
|
||||
* @sym: The name of the per-cpu variable
|
||||
* @tmp: scratch register
|
||||
*/
|
||||
.macro adr_this_cpu, dst, sym, tmp
|
||||
adr_l \dst, \sym
|
||||
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
||||
mrs \tmp, tpidr_el1
|
||||
alternative_else
|
||||
mrs \tmp, tpidr_el2
|
||||
alternative_endif
|
||||
add \dst, \dst, \tmp
|
||||
.endm
|
||||
|
||||
/*
|
||||
* @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
|
||||
* @sym: The name of the per-cpu variable
|
||||
* @tmp: scratch register
|
||||
*/
|
||||
.macro ldr_this_cpu dst, sym, tmp
|
||||
adr_l \dst, \sym
|
||||
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
||||
mrs \tmp, tpidr_el1
|
||||
alternative_else
|
||||
mrs \tmp, tpidr_el2
|
||||
alternative_endif
|
||||
ldr \dst, [\dst, \tmp]
|
||||
.endm
|
||||
|
||||
/*
|
||||
* vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
|
||||
*/
|
||||
|
||||
@@ -34,10 +34,10 @@
|
||||
#define ARM64_HAS_32BIT_EL0 13
|
||||
#define ARM64_HYP_OFFSET_LOW 14
|
||||
#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
|
||||
#define ARM64_UNMAP_KERNEL_AT_EL0 16
|
||||
#define ARM64_HARDEN_BRANCH_PREDICTOR 17
|
||||
#define ARM64_SSBD 18
|
||||
|
||||
#define ARM64_UNMAP_KERNEL_AT_EL0 23
|
||||
#define ARM64_HARDEN_BRANCH_PREDICTOR 24
|
||||
|
||||
#define ARM64_NCAPS 25
|
||||
#define ARM64_NCAPS 19
|
||||
|
||||
#endif /* __ASM_CPUCAPS_H */
|
||||
|
||||
@@ -227,6 +227,28 @@ static inline bool system_uses_ttbr0_pan(void)
|
||||
!cpus_have_cap(ARM64_HAS_PAN);
|
||||
}
|
||||
|
||||
#define ARM64_SSBD_UNKNOWN -1
|
||||
#define ARM64_SSBD_FORCE_DISABLE 0
|
||||
#define ARM64_SSBD_KERNEL 1
|
||||
#define ARM64_SSBD_FORCE_ENABLE 2
|
||||
#define ARM64_SSBD_MITIGATED 3
|
||||
|
||||
static inline int arm64_get_ssbd_state(void)
|
||||
{
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
extern int ssbd_state;
|
||||
return ssbd_state;
|
||||
#else
|
||||
return ARM64_SSBD_UNKNOWN;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
void arm64_set_ssbd_mitigation(bool state);
|
||||
#else
|
||||
static inline void arm64_set_ssbd_mitigation(bool state) {}
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif
|
||||
|
||||
@@ -33,6 +33,10 @@
|
||||
#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
|
||||
#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
|
||||
|
||||
#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
|
||||
#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
|
||||
|
||||
/* Translate a kernel address of @sym into its equivalent linear mapping */
|
||||
#define kvm_ksym_ref(sym) \
|
||||
({ \
|
||||
void *val = &sym; \
|
||||
@@ -65,6 +69,43 @@ extern u32 __kvm_get_mdcr_el2(void);
|
||||
|
||||
extern u32 __init_stage2_translation(void);
|
||||
|
||||
/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
|
||||
#define __hyp_this_cpu_ptr(sym) \
|
||||
({ \
|
||||
void *__ptr = hyp_symbol_addr(sym); \
|
||||
__ptr += read_sysreg(tpidr_el2); \
|
||||
(typeof(&sym))__ptr; \
|
||||
})
|
||||
|
||||
#define __hyp_this_cpu_read(sym) \
|
||||
({ \
|
||||
*__hyp_this_cpu_ptr(sym); \
|
||||
})
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
.macro hyp_adr_this_cpu reg, sym, tmp
|
||||
adr_l \reg, \sym
|
||||
mrs \tmp, tpidr_el2
|
||||
add \reg, \reg, \tmp
|
||||
.endm
|
||||
|
||||
.macro hyp_ldr_this_cpu reg, sym, tmp
|
||||
adr_l \reg, \sym
|
||||
mrs \tmp, tpidr_el2
|
||||
ldr \reg, [\reg, \tmp]
|
||||
.endm
|
||||
|
||||
.macro get_host_ctxt reg, tmp
|
||||
hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
|
||||
.endm
|
||||
|
||||
.macro get_vcpu_ptr vcpu, ctxt
|
||||
get_host_ctxt \ctxt, \vcpu
|
||||
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
|
||||
kern_hyp_va \vcpu
|
||||
.endm
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __ARM_KVM_ASM_H__ */
|
||||
|
||||
@@ -197,6 +197,8 @@ struct kvm_cpu_context {
|
||||
u64 sys_regs[NR_SYS_REGS];
|
||||
u32 copro[NR_COPRO_REGS];
|
||||
};
|
||||
|
||||
struct kvm_vcpu *__hyp_running_vcpu;
|
||||
};
|
||||
|
||||
typedef struct kvm_cpu_context kvm_cpu_context_t;
|
||||
@@ -211,6 +213,9 @@ struct kvm_vcpu_arch {
|
||||
/* Exception Information */
|
||||
struct kvm_vcpu_fault_info fault;
|
||||
|
||||
/* State of various workarounds, see kvm_asm.h for bit assignment */
|
||||
u64 workaround_flags;
|
||||
|
||||
/* Guest debug state */
|
||||
u64 debug_flags;
|
||||
|
||||
@@ -354,10 +359,15 @@ int kvm_perf_teardown(void);
|
||||
|
||||
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
|
||||
|
||||
void __kvm_set_tpidr_el2(u64 tpidr_el2);
|
||||
DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
|
||||
|
||||
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
|
||||
unsigned long hyp_stack_ptr,
|
||||
unsigned long vector_ptr)
|
||||
{
|
||||
u64 tpidr_el2;
|
||||
|
||||
/*
|
||||
* Call initialization code, and switch to the full blown HYP code.
|
||||
* If the cpucaps haven't been finalized yet, something has gone very
|
||||
@@ -366,6 +376,16 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
|
||||
*/
|
||||
BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
|
||||
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
|
||||
|
||||
/*
|
||||
* Calculate the raw per-cpu offset without a translation from the
|
||||
* kernel's mapping to the linear mapping, and store it in tpidr_el2
|
||||
* so that we can use adr_l to access per-cpu variables in EL2.
|
||||
*/
|
||||
tpidr_el2 = (u64)this_cpu_ptr(&kvm_host_cpu_state)
|
||||
- (u64)kvm_ksym_ref(kvm_host_cpu_state);
|
||||
|
||||
kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2);
|
||||
}
|
||||
|
||||
void __kvm_hyp_teardown(void);
|
||||
@@ -405,4 +425,27 @@ static inline bool kvm_arm_harden_branch_predictor(void)
|
||||
return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
|
||||
}
|
||||
|
||||
#define KVM_SSBD_UNKNOWN -1
|
||||
#define KVM_SSBD_FORCE_DISABLE 0
|
||||
#define KVM_SSBD_KERNEL 1
|
||||
#define KVM_SSBD_FORCE_ENABLE 2
|
||||
#define KVM_SSBD_MITIGATED 3
|
||||
|
||||
static inline int kvm_arm_have_ssbd(void)
|
||||
{
|
||||
switch (arm64_get_ssbd_state()) {
|
||||
case ARM64_SSBD_FORCE_DISABLE:
|
||||
return KVM_SSBD_FORCE_DISABLE;
|
||||
case ARM64_SSBD_KERNEL:
|
||||
return KVM_SSBD_KERNEL;
|
||||
case ARM64_SSBD_FORCE_ENABLE:
|
||||
return KVM_SSBD_FORCE_ENABLE;
|
||||
case ARM64_SSBD_MITIGATED:
|
||||
return KVM_SSBD_MITIGATED;
|
||||
case ARM64_SSBD_UNKNOWN:
|
||||
default:
|
||||
return KVM_SSBD_UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* __ARM64_KVM_HOST_H__ */
|
||||
|
||||
@@ -130,6 +130,26 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
|
||||
|
||||
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
|
||||
|
||||
/*
|
||||
* Obtain the PC-relative address of a kernel symbol
|
||||
* s: symbol
|
||||
*
|
||||
* The goal of this macro is to return a symbol's address based on a
|
||||
* PC-relative computation, as opposed to a loading the VA from a
|
||||
* constant pool or something similar. This works well for HYP, as an
|
||||
* absolute VA is guaranteed to be wrong. Only use this if trying to
|
||||
* obtain the address of a symbol (i.e. not something you obtained by
|
||||
* following a pointer).
|
||||
*/
|
||||
#define hyp_symbol_addr(s) \
|
||||
({ \
|
||||
typeof(s) *addr; \
|
||||
asm("adrp %0, %1\n" \
|
||||
"add %0, %0, :lo12:%1\n" \
|
||||
: "=r" (addr) : "S" (&s)); \
|
||||
addr; \
|
||||
})
|
||||
|
||||
/*
|
||||
* We currently only support a 40bit IPA.
|
||||
*/
|
||||
@@ -367,5 +387,29 @@ static inline int kvm_map_vectors(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
|
||||
|
||||
static inline int hyp_map_aux_data(void)
|
||||
{
|
||||
int cpu, err;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
u64 *ptr;
|
||||
|
||||
ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
|
||||
err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline int hyp_map_aux_data(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ARM64_KVM_MMU_H__ */
|
||||
|
||||
@@ -17,10 +17,14 @@
|
||||
#define __ASM_PERCPU_H
|
||||
|
||||
#include <asm/stack_pointer.h>
|
||||
#include <asm/alternative.h>
|
||||
|
||||
static inline void set_my_cpu_offset(unsigned long off)
|
||||
{
|
||||
asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
|
||||
asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
|
||||
"msr tpidr_el2, %0",
|
||||
ARM64_HAS_VIRT_HOST_EXTN)
|
||||
:: "r" (off) : "memory");
|
||||
}
|
||||
|
||||
static inline unsigned long __my_cpu_offset(void)
|
||||
@@ -31,7 +35,10 @@ static inline unsigned long __my_cpu_offset(void)
|
||||
* We want to allow caching the value, so avoid using volatile and
|
||||
* instead use a fake stack read to hazard against barrier().
|
||||
*/
|
||||
asm("mrs %0, tpidr_el1" : "=r" (off) :
|
||||
asm(ALTERNATIVE("mrs %0, tpidr_el1",
|
||||
"mrs %0, tpidr_el2",
|
||||
ARM64_HAS_VIRT_HOST_EXTN)
|
||||
: "=r" (off) :
|
||||
"Q" (*(const unsigned long *)current_stack_pointer));
|
||||
|
||||
return off;
|
||||
|
||||
@@ -95,6 +95,7 @@ struct thread_info {
|
||||
#define TIF_RESTORE_SIGMASK 20
|
||||
#define TIF_SINGLESTEP 21
|
||||
#define TIF_32BIT 22 /* 32bit process */
|
||||
#define TIF_SSBD 23 /* Wants SSB mitigation */
|
||||
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||
|
||||
@@ -50,6 +50,7 @@ arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
|
||||
arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
|
||||
arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
|
||||
cpu-reset.o
|
||||
arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
|
||||
|
||||
ifeq ($(CONFIG_KVM),y)
|
||||
arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o
|
||||
|
||||
@@ -28,10 +28,12 @@
|
||||
#include <asm/sections.h>
|
||||
#include <linux/stop_machine.h>
|
||||
|
||||
#define __ALT_PTR(a,f) (u32 *)((void *)&(a)->f + (a)->f)
|
||||
#define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f)
|
||||
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
|
||||
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
|
||||
|
||||
int alternatives_applied;
|
||||
|
||||
struct alt_region {
|
||||
struct alt_instr *begin;
|
||||
struct alt_instr *end;
|
||||
@@ -105,31 +107,52 @@ static u32 get_alt_insn(struct alt_instr *alt, u32 *insnptr, u32 *altinsnptr)
|
||||
return insn;
|
||||
}
|
||||
|
||||
static void patch_alternative(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst)
|
||||
{
|
||||
__le32 *replptr;
|
||||
int i;
|
||||
|
||||
replptr = ALT_REPL_PTR(alt);
|
||||
for (i = 0; i < nr_inst; i++) {
|
||||
u32 insn;
|
||||
|
||||
insn = get_alt_insn(alt, origptr + i, replptr + i);
|
||||
updptr[i] = cpu_to_le32(insn);
|
||||
}
|
||||
}
|
||||
|
||||
static void __apply_alternatives(void *alt_region)
|
||||
{
|
||||
struct alt_instr *alt;
|
||||
struct alt_region *region = alt_region;
|
||||
u32 *origptr, *replptr;
|
||||
__le32 *origptr;
|
||||
alternative_cb_t alt_cb;
|
||||
|
||||
for (alt = region->begin; alt < region->end; alt++) {
|
||||
u32 insn;
|
||||
int i, nr_inst;
|
||||
int nr_inst;
|
||||
|
||||
if (!cpus_have_cap(alt->cpufeature))
|
||||
/* Use ARM64_CB_PATCH as an unconditional patch */
|
||||
if (alt->cpufeature < ARM64_CB_PATCH &&
|
||||
!cpus_have_cap(alt->cpufeature))
|
||||
continue;
|
||||
|
||||
BUG_ON(alt->alt_len != alt->orig_len);
|
||||
if (alt->cpufeature == ARM64_CB_PATCH)
|
||||
BUG_ON(alt->alt_len != 0);
|
||||
else
|
||||
BUG_ON(alt->alt_len != alt->orig_len);
|
||||
|
||||
pr_info_once("patching kernel code\n");
|
||||
|
||||
origptr = ALT_ORIG_PTR(alt);
|
||||
replptr = ALT_REPL_PTR(alt);
|
||||
nr_inst = alt->alt_len / sizeof(insn);
|
||||
nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
|
||||
|
||||
for (i = 0; i < nr_inst; i++) {
|
||||
insn = get_alt_insn(alt, origptr + i, replptr + i);
|
||||
*(origptr + i) = cpu_to_le32(insn);
|
||||
}
|
||||
if (alt->cpufeature < ARM64_CB_PATCH)
|
||||
alt_cb = patch_alternative;
|
||||
else
|
||||
alt_cb = ALT_REPL_PTR(alt);
|
||||
|
||||
alt_cb(alt, origptr, origptr, nr_inst);
|
||||
|
||||
flush_icache_range((uintptr_t)origptr,
|
||||
(uintptr_t)(origptr + nr_inst));
|
||||
@@ -142,7 +165,6 @@ static void __apply_alternatives(void *alt_region)
|
||||
*/
|
||||
static int __apply_alternatives_multi_stop(void *unused)
|
||||
{
|
||||
static int patched = 0;
|
||||
struct alt_region region = {
|
||||
.begin = (struct alt_instr *)__alt_instructions,
|
||||
.end = (struct alt_instr *)__alt_instructions_end,
|
||||
@@ -150,14 +172,14 @@ static int __apply_alternatives_multi_stop(void *unused)
|
||||
|
||||
/* We always have a CPU 0 at this point (__init) */
|
||||
if (smp_processor_id()) {
|
||||
while (!READ_ONCE(patched))
|
||||
while (!READ_ONCE(alternatives_applied))
|
||||
cpu_relax();
|
||||
isb();
|
||||
} else {
|
||||
BUG_ON(patched);
|
||||
BUG_ON(alternatives_applied);
|
||||
__apply_alternatives(®ion);
|
||||
/* Barriers provided by the cache flushing */
|
||||
WRITE_ONCE(patched, 1);
|
||||
WRITE_ONCE(alternatives_applied, 1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -130,11 +130,13 @@ int main(void)
|
||||
BLANK();
|
||||
#ifdef CONFIG_KVM_ARM_HOST
|
||||
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
|
||||
DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
|
||||
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
|
||||
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
|
||||
DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
|
||||
DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
|
||||
DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
|
||||
DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
|
||||
#endif
|
||||
#ifdef CONFIG_CPU_PM
|
||||
DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));
|
||||
|
||||
@@ -187,6 +187,178 @@ static int enable_smccc_arch_workaround_1(void *data)
|
||||
}
|
||||
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
|
||||
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
|
||||
|
||||
int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
|
||||
|
||||
static const struct ssbd_options {
|
||||
const char *str;
|
||||
int state;
|
||||
} ssbd_options[] = {
|
||||
{ "force-on", ARM64_SSBD_FORCE_ENABLE, },
|
||||
{ "force-off", ARM64_SSBD_FORCE_DISABLE, },
|
||||
{ "kernel", ARM64_SSBD_KERNEL, },
|
||||
};
|
||||
|
||||
static int __init ssbd_cfg(char *buf)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!buf || !buf[0])
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
|
||||
int len = strlen(ssbd_options[i].str);
|
||||
|
||||
if (strncmp(buf, ssbd_options[i].str, len))
|
||||
continue;
|
||||
|
||||
ssbd_state = ssbd_options[i].state;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
early_param("ssbd", ssbd_cfg);
|
||||
|
||||
void __init arm64_update_smccc_conduit(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr,
|
||||
int nr_inst)
|
||||
{
|
||||
u32 insn;
|
||||
|
||||
BUG_ON(nr_inst != 1);
|
||||
|
||||
switch (psci_ops.conduit) {
|
||||
case PSCI_CONDUIT_HVC:
|
||||
insn = aarch64_insn_get_hvc_value();
|
||||
break;
|
||||
case PSCI_CONDUIT_SMC:
|
||||
insn = aarch64_insn_get_smc_value();
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
*updptr = cpu_to_le32(insn);
|
||||
}
|
||||
|
||||
void __init arm64_enable_wa2_handling(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr,
|
||||
int nr_inst)
|
||||
{
|
||||
BUG_ON(nr_inst != 1);
|
||||
/*
|
||||
* Only allow mitigation on EL1 entry/exit and guest
|
||||
* ARCH_WORKAROUND_2 handling if the SSBD state allows it to
|
||||
* be flipped.
|
||||
*/
|
||||
if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
|
||||
*updptr = cpu_to_le32(aarch64_insn_gen_nop());
|
||||
}
|
||||
|
||||
void arm64_set_ssbd_mitigation(bool state)
|
||||
{
|
||||
switch (psci_ops.conduit) {
|
||||
case PSCI_CONDUIT_HVC:
|
||||
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
|
||||
break;
|
||||
|
||||
case PSCI_CONDUIT_SMC:
|
||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
|
||||
break;
|
||||
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
bool required = true;
|
||||
s32 val;
|
||||
|
||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||
|
||||
if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
|
||||
ssbd_state = ARM64_SSBD_UNKNOWN;
|
||||
return false;
|
||||
}
|
||||
|
||||
switch (psci_ops.conduit) {
|
||||
case PSCI_CONDUIT_HVC:
|
||||
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_2, &res);
|
||||
break;
|
||||
|
||||
case PSCI_CONDUIT_SMC:
|
||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_2, &res);
|
||||
break;
|
||||
|
||||
default:
|
||||
ssbd_state = ARM64_SSBD_UNKNOWN;
|
||||
return false;
|
||||
}
|
||||
|
||||
val = (s32)res.a0;
|
||||
|
||||
switch (val) {
|
||||
case SMCCC_RET_NOT_SUPPORTED:
|
||||
ssbd_state = ARM64_SSBD_UNKNOWN;
|
||||
return false;
|
||||
|
||||
case SMCCC_RET_NOT_REQUIRED:
|
||||
pr_info_once("%s mitigation not required\n", entry->desc);
|
||||
ssbd_state = ARM64_SSBD_MITIGATED;
|
||||
return false;
|
||||
|
||||
case SMCCC_RET_SUCCESS:
|
||||
required = true;
|
||||
break;
|
||||
|
||||
case 1: /* Mitigation not required on this CPU */
|
||||
required = false;
|
||||
break;
|
||||
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return false;
|
||||
}
|
||||
|
||||
switch (ssbd_state) {
|
||||
case ARM64_SSBD_FORCE_DISABLE:
|
||||
pr_info_once("%s disabled from command-line\n", entry->desc);
|
||||
arm64_set_ssbd_mitigation(false);
|
||||
required = false;
|
||||
break;
|
||||
|
||||
case ARM64_SSBD_KERNEL:
|
||||
if (required) {
|
||||
__this_cpu_write(arm64_ssbd_callback_required, 1);
|
||||
arm64_set_ssbd_mitigation(true);
|
||||
}
|
||||
break;
|
||||
|
||||
case ARM64_SSBD_FORCE_ENABLE:
|
||||
pr_info_once("%s forced from command-line\n", entry->desc);
|
||||
arm64_set_ssbd_mitigation(true);
|
||||
required = true;
|
||||
break;
|
||||
|
||||
default:
|
||||
WARN_ON(1);
|
||||
break;
|
||||
}
|
||||
|
||||
return required;
|
||||
}
|
||||
#endif /* CONFIG_ARM64_SSBD */
|
||||
|
||||
#define MIDR_RANGE(model, min, max) \
|
||||
.def_scope = SCOPE_LOCAL_CPU, \
|
||||
.matches = is_affected_midr_range, \
|
||||
@@ -309,6 +481,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
|
||||
.enable = enable_smccc_arch_workaround_1,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
{
|
||||
.desc = "Speculative Store Bypass Disable",
|
||||
.def_scope = SCOPE_LOCAL_CPU,
|
||||
.capability = ARM64_SSBD,
|
||||
.matches = has_ssbd_mitigation,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
||||
@@ -830,6 +830,22 @@ static int __init parse_kpti(char *str)
|
||||
early_param("kpti", parse_kpti);
|
||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
|
||||
static int cpu_copy_el2regs(void *__unused)
|
||||
{
|
||||
/*
|
||||
* Copy register values that aren't redirected by hardware.
|
||||
*
|
||||
* Before code patching, we only set tpidr_el1, all CPUs need to copy
|
||||
* this value to tpidr_el2 before we patch the code. Once we've done
|
||||
* that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
|
||||
* do anything here.
|
||||
*/
|
||||
if (!alternatives_applied)
|
||||
write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
{
|
||||
.desc = "GIC system register CPU interface",
|
||||
@@ -896,6 +912,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
.capability = ARM64_HAS_VIRT_HOST_EXTN,
|
||||
.def_scope = SCOPE_SYSTEM,
|
||||
.matches = runs_at_el2,
|
||||
.enable = cpu_copy_el2regs,
|
||||
},
|
||||
{
|
||||
.desc = "32-bit EL0 Support",
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
@@ -97,6 +98,25 @@ alternative_else_nop_endif
|
||||
add \dst, \dst, #(\sym - .entry.tramp.text)
|
||||
.endm
|
||||
|
||||
// This macro corrupts x0-x3. It is the caller's duty
|
||||
// to save/restore them if required.
|
||||
.macro apply_ssbd, state, targ, tmp1, tmp2
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
alternative_cb arm64_enable_wa2_handling
|
||||
b \targ
|
||||
alternative_cb_end
|
||||
ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1
|
||||
cbz \tmp2, \targ
|
||||
ldr \tmp2, [tsk, #TI_FLAGS]
|
||||
tbnz \tmp2, #TIF_SSBD, \targ
|
||||
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
|
||||
mov w1, #\state
|
||||
alternative_cb arm64_update_smccc_conduit
|
||||
nop // Patched to SMC/HVC #0
|
||||
alternative_cb_end
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro kernel_entry, el, regsize = 64
|
||||
.if \regsize == 32
|
||||
mov w0, w0 // zero upper 32 bits of x0
|
||||
@@ -123,6 +143,14 @@ alternative_else_nop_endif
|
||||
ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
|
||||
disable_step_tsk x19, x20 // exceptions when scheduling.
|
||||
|
||||
apply_ssbd 1, 1f, x22, x23
|
||||
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
ldp x0, x1, [sp, #16 * 0]
|
||||
ldp x2, x3, [sp, #16 * 1]
|
||||
#endif
|
||||
1:
|
||||
|
||||
mov x29, xzr // fp pointed to user-space
|
||||
.else
|
||||
add x21, sp, #S_FRAME_SIZE
|
||||
@@ -251,6 +279,8 @@ alternative_if ARM64_WORKAROUND_845719
|
||||
alternative_else_nop_endif
|
||||
#endif
|
||||
3:
|
||||
apply_ssbd 0, 5f, x0, x1
|
||||
5:
|
||||
.endif
|
||||
|
||||
msr elr_el1, x21 // set up the return data
|
||||
|
||||
@@ -305,6 +305,17 @@ int swsusp_arch_suspend(void)
|
||||
|
||||
sleep_cpu = -EINVAL;
|
||||
__cpu_suspend_exit();
|
||||
|
||||
/*
|
||||
* Just in case the boot kernel did turn the SSBD
|
||||
* mitigation off behind our back, let's set the state
|
||||
* to what we expect it to be.
|
||||
*/
|
||||
switch (arm64_get_ssbd_state()) {
|
||||
case ARM64_SSBD_FORCE_ENABLE:
|
||||
case ARM64_SSBD_KERNEL:
|
||||
arm64_set_ssbd_mitigation(true);
|
||||
}
|
||||
}
|
||||
|
||||
local_dbg_restore(flags);
|
||||
|
||||
108
arch/arm64/kernel/ssbd.c
Normal file
108
arch/arm64/kernel/ssbd.c
Normal file
@@ -0,0 +1,108 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2018 ARM Ltd, All Rights Reserved.
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/prctl.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/thread_info.h>
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
/*
|
||||
* prctl interface for SSBD
|
||||
*/
|
||||
static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
|
||||
{
|
||||
int state = arm64_get_ssbd_state();
|
||||
|
||||
/* Unsupported */
|
||||
if (state == ARM64_SSBD_UNKNOWN)
|
||||
return -EINVAL;
|
||||
|
||||
/* Treat the unaffected/mitigated state separately */
|
||||
if (state == ARM64_SSBD_MITIGATED) {
|
||||
switch (ctrl) {
|
||||
case PR_SPEC_ENABLE:
|
||||
return -EPERM;
|
||||
case PR_SPEC_DISABLE:
|
||||
case PR_SPEC_FORCE_DISABLE:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Things are a bit backward here: the arm64 internal API
|
||||
* *enables the mitigation* when the userspace API *disables
|
||||
* speculation*. So much fun.
|
||||
*/
|
||||
switch (ctrl) {
|
||||
case PR_SPEC_ENABLE:
|
||||
/* If speculation is force disabled, enable is not allowed */
|
||||
if (state == ARM64_SSBD_FORCE_ENABLE ||
|
||||
task_spec_ssb_force_disable(task))
|
||||
return -EPERM;
|
||||
task_clear_spec_ssb_disable(task);
|
||||
clear_tsk_thread_flag(task, TIF_SSBD);
|
||||
break;
|
||||
case PR_SPEC_DISABLE:
|
||||
if (state == ARM64_SSBD_FORCE_DISABLE)
|
||||
return -EPERM;
|
||||
task_set_spec_ssb_disable(task);
|
||||
set_tsk_thread_flag(task, TIF_SSBD);
|
||||
break;
|
||||
case PR_SPEC_FORCE_DISABLE:
|
||||
if (state == ARM64_SSBD_FORCE_DISABLE)
|
||||
return -EPERM;
|
||||
task_set_spec_ssb_disable(task);
|
||||
task_set_spec_ssb_force_disable(task);
|
||||
set_tsk_thread_flag(task, TIF_SSBD);
|
||||
break;
|
||||
default:
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
|
||||
unsigned long ctrl)
|
||||
{
|
||||
switch (which) {
|
||||
case PR_SPEC_STORE_BYPASS:
|
||||
return ssbd_prctl_set(task, ctrl);
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
static int ssbd_prctl_get(struct task_struct *task)
|
||||
{
|
||||
switch (arm64_get_ssbd_state()) {
|
||||
case ARM64_SSBD_UNKNOWN:
|
||||
return -EINVAL;
|
||||
case ARM64_SSBD_FORCE_ENABLE:
|
||||
return PR_SPEC_DISABLE;
|
||||
case ARM64_SSBD_KERNEL:
|
||||
if (task_spec_ssb_force_disable(task))
|
||||
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
|
||||
if (task_spec_ssb_disable(task))
|
||||
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
|
||||
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
|
||||
case ARM64_SSBD_FORCE_DISABLE:
|
||||
return PR_SPEC_ENABLE;
|
||||
default:
|
||||
return PR_SPEC_NOT_AFFECTED;
|
||||
}
|
||||
}
|
||||
|
||||
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
|
||||
{
|
||||
switch (which) {
|
||||
case PR_SPEC_STORE_BYPASS:
|
||||
return ssbd_prctl_get(task);
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
@@ -61,6 +61,14 @@ void notrace __cpu_suspend_exit(void)
|
||||
*/
|
||||
if (hw_breakpoint_restore)
|
||||
hw_breakpoint_restore(cpu);
|
||||
|
||||
/*
|
||||
* On resume, firmware implementing dynamic mitigation will
|
||||
* have turned the mitigation on. If the user has forcefully
|
||||
* disabled it, make sure their wishes are obeyed.
|
||||
*/
|
||||
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
|
||||
arm64_set_ssbd_mitigation(false);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -118,6 +118,10 @@ CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
|
||||
kern_hyp_va x2
|
||||
msr vbar_el2, x2
|
||||
|
||||
/* copy tpidr_el1 into tpidr_el2 for use by HYP */
|
||||
mrs x1, tpidr_el1
|
||||
msr tpidr_el2, x1
|
||||
|
||||
/* Hello, World! */
|
||||
eret
|
||||
ENDPROC(__kvm_hyp_init)
|
||||
|
||||
@@ -62,9 +62,6 @@ ENTRY(__guest_enter)
|
||||
// Store the host regs
|
||||
save_callee_saved_regs x1
|
||||
|
||||
// Store the host_ctxt for use at exit time
|
||||
str x1, [sp, #-16]!
|
||||
|
||||
add x18, x0, #VCPU_CONTEXT
|
||||
|
||||
// Restore guest regs x0-x17
|
||||
@@ -118,8 +115,7 @@ ENTRY(__guest_exit)
|
||||
// Store the guest regs x19-x29, lr
|
||||
save_callee_saved_regs x1
|
||||
|
||||
// Restore the host_ctxt from the stack
|
||||
ldr x2, [sp], #16
|
||||
get_host_ctxt x2, x3
|
||||
|
||||
// Now restore the host regs
|
||||
restore_callee_saved_regs x2
|
||||
@@ -159,6 +155,10 @@ abort_guest_exit_end:
|
||||
ENDPROC(__guest_exit)
|
||||
|
||||
ENTRY(__fpsimd_guest_restore)
|
||||
// x0: esr
|
||||
// x1: vcpu
|
||||
// x2-x29,lr: vcpu regs
|
||||
// vcpu x0-x1 on the stack
|
||||
stp x2, x3, [sp, #-16]!
|
||||
stp x4, lr, [sp, #-16]!
|
||||
|
||||
@@ -173,7 +173,7 @@ alternative_else
|
||||
alternative_endif
|
||||
isb
|
||||
|
||||
mrs x3, tpidr_el2
|
||||
mov x3, x1
|
||||
|
||||
ldr x0, [x3, #VCPU_HOST_CONTEXT]
|
||||
kern_hyp_va x0
|
||||
|
||||
@@ -72,13 +72,8 @@ ENDPROC(__kvm_hyp_teardown)
|
||||
el1_sync: // Guest trapped into EL2
|
||||
stp x0, x1, [sp, #-16]!
|
||||
|
||||
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
||||
mrs x1, esr_el2
|
||||
alternative_else
|
||||
mrs x1, esr_el1
|
||||
alternative_endif
|
||||
lsr x0, x1, #ESR_ELx_EC_SHIFT
|
||||
|
||||
mrs x0, esr_el2
|
||||
lsr x0, x0, #ESR_ELx_EC_SHIFT
|
||||
cmp x0, #ESR_ELx_EC_HVC64
|
||||
ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
|
||||
b.ne el1_trap
|
||||
@@ -112,33 +107,73 @@ el1_hvc_guest:
|
||||
*/
|
||||
ldr x1, [sp] // Guest's x0
|
||||
eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
|
||||
cbz w1, wa_epilogue
|
||||
|
||||
/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
|
||||
eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
|
||||
ARM_SMCCC_ARCH_WORKAROUND_2)
|
||||
cbnz w1, el1_trap
|
||||
mov x0, x1
|
||||
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
alternative_cb arm64_enable_wa2_handling
|
||||
b wa2_end
|
||||
alternative_cb_end
|
||||
get_vcpu_ptr x2, x0
|
||||
ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
|
||||
|
||||
// Sanitize the argument and update the guest flags
|
||||
ldr x1, [sp, #8] // Guest's x1
|
||||
clz w1, w1 // Murphy's device:
|
||||
lsr w1, w1, #5 // w1 = !!w1 without using
|
||||
eor w1, w1, #1 // the flags...
|
||||
bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
|
||||
str x0, [x2, #VCPU_WORKAROUND_FLAGS]
|
||||
|
||||
/* Check that we actually need to perform the call */
|
||||
hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
|
||||
cbz x0, wa2_end
|
||||
|
||||
mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
|
||||
smc #0
|
||||
|
||||
/* Don't leak data from the SMC call */
|
||||
mov x3, xzr
|
||||
wa2_end:
|
||||
mov x2, xzr
|
||||
mov x1, xzr
|
||||
#endif
|
||||
|
||||
wa_epilogue:
|
||||
mov x0, xzr
|
||||
add sp, sp, #16
|
||||
eret
|
||||
|
||||
el1_trap:
|
||||
get_vcpu_ptr x1, x0
|
||||
|
||||
mrs x0, esr_el2
|
||||
lsr x0, x0, #ESR_ELx_EC_SHIFT
|
||||
/*
|
||||
* x0: ESR_EC
|
||||
* x1: vcpu pointer
|
||||
*/
|
||||
|
||||
/* Guest accessed VFP/SIMD registers, save host, restore Guest */
|
||||
cmp x0, #ESR_ELx_EC_FP_ASIMD
|
||||
b.eq __fpsimd_guest_restore
|
||||
|
||||
mrs x1, tpidr_el2
|
||||
mov x0, #ARM_EXCEPTION_TRAP
|
||||
b __guest_exit
|
||||
|
||||
el1_irq:
|
||||
stp x0, x1, [sp, #-16]!
|
||||
mrs x1, tpidr_el2
|
||||
get_vcpu_ptr x1, x0
|
||||
mov x0, #ARM_EXCEPTION_IRQ
|
||||
b __guest_exit
|
||||
|
||||
el1_error:
|
||||
stp x0, x1, [sp, #-16]!
|
||||
mrs x1, tpidr_el2
|
||||
get_vcpu_ptr x1, x0
|
||||
mov x0, #ARM_EXCEPTION_EL1_SERROR
|
||||
b __guest_exit
|
||||
|
||||
@@ -173,6 +208,11 @@ ENTRY(__hyp_do_panic)
|
||||
eret
|
||||
ENDPROC(__hyp_do_panic)
|
||||
|
||||
ENTRY(__hyp_panic)
|
||||
get_host_ctxt x0, x1
|
||||
b hyp_panic
|
||||
ENDPROC(__hyp_panic)
|
||||
|
||||
.macro invalid_vector label, target = __hyp_panic
|
||||
.align 2
|
||||
\label:
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <uapi/linux/psci.h>
|
||||
@@ -267,6 +268,39 @@ static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
|
||||
write_sysreg_el2(*vcpu_pc(vcpu), elr);
|
||||
}
|
||||
|
||||
static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!cpus_have_cap(ARM64_SSBD))
|
||||
return false;
|
||||
|
||||
return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
|
||||
}
|
||||
|
||||
static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
/*
|
||||
* The host runs with the workaround always present. If the
|
||||
* guest wants it disabled, so be it...
|
||||
*/
|
||||
if (__needs_ssbd_off(vcpu) &&
|
||||
__hyp_this_cpu_read(arm64_ssbd_callback_required))
|
||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
#ifdef CONFIG_ARM64_SSBD
|
||||
/*
|
||||
* If the guest has disabled the workaround, bring it back on.
|
||||
*/
|
||||
if (__needs_ssbd_off(vcpu) &&
|
||||
__hyp_this_cpu_read(arm64_ssbd_callback_required))
|
||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
@@ -275,9 +309,9 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
u64 exit_code;
|
||||
|
||||
vcpu = kern_hyp_va(vcpu);
|
||||
write_sysreg(vcpu, tpidr_el2);
|
||||
|
||||
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
|
||||
host_ctxt->__hyp_running_vcpu = vcpu;
|
||||
guest_ctxt = &vcpu->arch.ctxt;
|
||||
|
||||
__sysreg_save_host_state(host_ctxt);
|
||||
@@ -297,6 +331,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
__sysreg_restore_guest_state(guest_ctxt);
|
||||
__debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
|
||||
|
||||
__set_guest_arch_workaround_state(vcpu);
|
||||
|
||||
/* Jump in the fire! */
|
||||
again:
|
||||
exit_code = __guest_enter(vcpu, host_ctxt);
|
||||
@@ -339,6 +375,8 @@ again:
|
||||
}
|
||||
}
|
||||
|
||||
__set_host_arch_workaround_state(vcpu);
|
||||
|
||||
fp_enabled = __fpsimd_enabled();
|
||||
|
||||
__sysreg_save_guest_state(guest_ctxt);
|
||||
@@ -364,7 +402,8 @@ again:
|
||||
|
||||
static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
|
||||
|
||||
static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
|
||||
static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long str_va;
|
||||
|
||||
@@ -378,35 +417,32 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
|
||||
__hyp_do_panic(str_va,
|
||||
spsr, elr,
|
||||
read_sysreg(esr_el2), read_sysreg_el2(far),
|
||||
read_sysreg(hpfar_el2), par,
|
||||
(void *)read_sysreg(tpidr_el2));
|
||||
read_sysreg(hpfar_el2), par, vcpu);
|
||||
}
|
||||
|
||||
static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par)
|
||||
static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
panic(__hyp_panic_string,
|
||||
spsr, elr,
|
||||
read_sysreg_el2(esr), read_sysreg_el2(far),
|
||||
read_sysreg(hpfar_el2), par,
|
||||
(void *)read_sysreg(tpidr_el2));
|
||||
read_sysreg(hpfar_el2), par, vcpu);
|
||||
}
|
||||
|
||||
static hyp_alternate_select(__hyp_call_panic,
|
||||
__hyp_call_panic_nvhe, __hyp_call_panic_vhe,
|
||||
ARM64_HAS_VIRT_HOST_EXTN);
|
||||
|
||||
void __hyp_text __noreturn __hyp_panic(void)
|
||||
void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = NULL;
|
||||
|
||||
u64 spsr = read_sysreg_el2(spsr);
|
||||
u64 elr = read_sysreg_el2(elr);
|
||||
u64 par = read_sysreg(par_el1);
|
||||
|
||||
if (read_sysreg(vttbr_el2)) {
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
|
||||
vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
|
||||
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
|
||||
vcpu = host_ctxt->__hyp_running_vcpu;
|
||||
__timer_save_state(vcpu);
|
||||
__deactivate_traps(vcpu);
|
||||
__deactivate_vm(vcpu);
|
||||
@@ -414,7 +450,7 @@ void __hyp_text __noreturn __hyp_panic(void)
|
||||
}
|
||||
|
||||
/* Call panic for real */
|
||||
__hyp_call_panic()(spsr, elr, par);
|
||||
__hyp_call_panic()(spsr, elr, par, vcpu);
|
||||
|
||||
unreachable();
|
||||
}
|
||||
|
||||
@@ -27,8 +27,8 @@ static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { }
|
||||
/*
|
||||
* Non-VHE: Both host and guest must save everything.
|
||||
*
|
||||
* VHE: Host must save tpidr*_el[01], actlr_el1, mdscr_el1, sp0, pc,
|
||||
* pstate, and guest must save everything.
|
||||
* VHE: Host must save tpidr*_el0, actlr_el1, mdscr_el1, sp_el0,
|
||||
* and guest must save everything.
|
||||
*/
|
||||
|
||||
static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
|
||||
@@ -36,11 +36,8 @@ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
|
||||
ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
|
||||
ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
|
||||
ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
|
||||
ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
|
||||
ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
|
||||
ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
|
||||
ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
|
||||
ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
|
||||
}
|
||||
|
||||
static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
|
||||
@@ -62,10 +59,13 @@ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
|
||||
ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair);
|
||||
ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl);
|
||||
ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
|
||||
ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
|
||||
|
||||
ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
|
||||
ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr);
|
||||
ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr);
|
||||
ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
|
||||
ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
|
||||
}
|
||||
|
||||
static hyp_alternate_select(__sysreg_call_save_host_state,
|
||||
@@ -89,11 +89,8 @@ static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctx
|
||||
write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
|
||||
write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
|
||||
write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
|
||||
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
|
||||
write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
|
||||
write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
|
||||
write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
|
||||
write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
|
||||
}
|
||||
|
||||
static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
|
||||
@@ -115,10 +112,13 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
|
||||
write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair);
|
||||
write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl);
|
||||
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
|
||||
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
|
||||
|
||||
write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
|
||||
write_sysreg_el1(ctxt->gp_regs.elr_el1, elr);
|
||||
write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr);
|
||||
write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
|
||||
write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
|
||||
}
|
||||
|
||||
static hyp_alternate_select(__sysreg_call_restore_host_state,
|
||||
@@ -183,3 +183,8 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
|
||||
if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
|
||||
write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
|
||||
}
|
||||
|
||||
void __hyp_text __kvm_set_tpidr_el2(u64 tpidr_el2)
|
||||
{
|
||||
asm("msr tpidr_el2, %0": : "r" (tpidr_el2));
|
||||
}
|
||||
|
||||
@@ -135,6 +135,10 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||
/* Reset PMU */
|
||||
kvm_pmu_vcpu_reset(vcpu);
|
||||
|
||||
/* Default workaround setup is enabled (if supported) */
|
||||
if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
|
||||
vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
|
||||
|
||||
/* Reset timer */
|
||||
return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/prctl.h>
|
||||
#include <linux/nmi.h>
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/bootinfo.h>
|
||||
@@ -633,28 +634,42 @@ unsigned long arch_align_stack(unsigned long sp)
|
||||
return sp & ALMASK;
|
||||
}
|
||||
|
||||
static void arch_dump_stack(void *info)
|
||||
static DEFINE_PER_CPU(struct call_single_data, backtrace_csd);
|
||||
static struct cpumask backtrace_csd_busy;
|
||||
|
||||
static void handle_backtrace(void *info)
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
nmi_cpu_backtrace(get_irq_regs());
|
||||
cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
|
||||
}
|
||||
|
||||
regs = get_irq_regs();
|
||||
static void raise_backtrace(cpumask_t *mask)
|
||||
{
|
||||
struct call_single_data *csd;
|
||||
int cpu;
|
||||
|
||||
if (regs)
|
||||
show_regs(regs);
|
||||
else
|
||||
dump_stack();
|
||||
for_each_cpu(cpu, mask) {
|
||||
/*
|
||||
* If we previously sent an IPI to the target CPU & it hasn't
|
||||
* cleared its bit in the busy cpumask then it didn't handle
|
||||
* our previous IPI & it's not safe for us to reuse the
|
||||
* call_single_data_t.
|
||||
*/
|
||||
if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
|
||||
pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
|
||||
cpu);
|
||||
continue;
|
||||
}
|
||||
|
||||
csd = &per_cpu(backtrace_csd, cpu);
|
||||
csd->func = handle_backtrace;
|
||||
smp_call_function_single_async(cpu, csd);
|
||||
}
|
||||
}
|
||||
|
||||
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
|
||||
{
|
||||
long this_cpu = get_cpu();
|
||||
|
||||
if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
|
||||
dump_stack();
|
||||
|
||||
smp_call_function_many(mask, arch_dump_stack, NULL, 1);
|
||||
|
||||
put_cpu();
|
||||
nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
|
||||
}
|
||||
|
||||
int mips_get_process_fp_mode(struct task_struct *task)
|
||||
|
||||
@@ -45,6 +45,65 @@
|
||||
#define _ASM_SI __ASM_REG(si)
|
||||
#define _ASM_DI __ASM_REG(di)
|
||||
|
||||
#ifndef __x86_64__
|
||||
/* 32 bit */
|
||||
|
||||
#define _ASM_ARG1 _ASM_AX
|
||||
#define _ASM_ARG2 _ASM_DX
|
||||
#define _ASM_ARG3 _ASM_CX
|
||||
|
||||
#define _ASM_ARG1L eax
|
||||
#define _ASM_ARG2L edx
|
||||
#define _ASM_ARG3L ecx
|
||||
|
||||
#define _ASM_ARG1W ax
|
||||
#define _ASM_ARG2W dx
|
||||
#define _ASM_ARG3W cx
|
||||
|
||||
#define _ASM_ARG1B al
|
||||
#define _ASM_ARG2B dl
|
||||
#define _ASM_ARG3B cl
|
||||
|
||||
#else
|
||||
/* 64 bit */
|
||||
|
||||
#define _ASM_ARG1 _ASM_DI
|
||||
#define _ASM_ARG2 _ASM_SI
|
||||
#define _ASM_ARG3 _ASM_DX
|
||||
#define _ASM_ARG4 _ASM_CX
|
||||
#define _ASM_ARG5 r8
|
||||
#define _ASM_ARG6 r9
|
||||
|
||||
#define _ASM_ARG1Q rdi
|
||||
#define _ASM_ARG2Q rsi
|
||||
#define _ASM_ARG3Q rdx
|
||||
#define _ASM_ARG4Q rcx
|
||||
#define _ASM_ARG5Q r8
|
||||
#define _ASM_ARG6Q r9
|
||||
|
||||
#define _ASM_ARG1L edi
|
||||
#define _ASM_ARG2L esi
|
||||
#define _ASM_ARG3L edx
|
||||
#define _ASM_ARG4L ecx
|
||||
#define _ASM_ARG5L r8d
|
||||
#define _ASM_ARG6L r9d
|
||||
|
||||
#define _ASM_ARG1W di
|
||||
#define _ASM_ARG2W si
|
||||
#define _ASM_ARG3W dx
|
||||
#define _ASM_ARG4W cx
|
||||
#define _ASM_ARG5W r8w
|
||||
#define _ASM_ARG6W r9w
|
||||
|
||||
#define _ASM_ARG1B dil
|
||||
#define _ASM_ARG2B sil
|
||||
#define _ASM_ARG3B dl
|
||||
#define _ASM_ARG4B cl
|
||||
#define _ASM_ARG5B r8b
|
||||
#define _ASM_ARG6B r9b
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Macros to generate condition code outputs from inline assembly,
|
||||
* The output operand must be type "bool".
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
* Interrupt control:
|
||||
*/
|
||||
|
||||
static inline unsigned long native_save_fl(void)
|
||||
extern inline unsigned long native_save_fl(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
||||
@@ -56,6 +56,7 @@ obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
|
||||
obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
|
||||
obj-y += pci-iommu_table.o
|
||||
obj-y += resource.o
|
||||
obj-y += irqflags.o
|
||||
|
||||
obj-y += process.o
|
||||
obj-y += fpu/
|
||||
|
||||
26
arch/x86/kernel/irqflags.S
Normal file
26
arch/x86/kernel/irqflags.S
Normal file
@@ -0,0 +1,26 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/export.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
/*
|
||||
* unsigned long native_save_fl(void)
|
||||
*/
|
||||
ENTRY(native_save_fl)
|
||||
pushf
|
||||
pop %_ASM_AX
|
||||
ret
|
||||
ENDPROC(native_save_fl)
|
||||
EXPORT_SYMBOL(native_save_fl)
|
||||
|
||||
/*
|
||||
* void native_restore_fl(unsigned long flags)
|
||||
* %eax/%rdi: flags
|
||||
*/
|
||||
ENTRY(native_restore_fl)
|
||||
push %_ASM_ARG1
|
||||
popf
|
||||
ret
|
||||
ENDPROC(native_restore_fl)
|
||||
EXPORT_SYMBOL(native_restore_fl)
|
||||
@@ -1483,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
|
||||
return -EFAULT;
|
||||
if (pool < 0 || pool > ZATM_LAST_POOL)
|
||||
return -EINVAL;
|
||||
pool = array_index_nospec(pool,
|
||||
ZATM_LAST_POOL + 1);
|
||||
if (copy_from_user(&info,
|
||||
&((struct zatm_pool_req __user *) arg)->info,
|
||||
sizeof(info))) return -EFAULT;
|
||||
|
||||
@@ -208,7 +208,7 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
|
||||
dev->pdr_pa);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
|
||||
memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
|
||||
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
|
||||
256 * PPC4XX_NUM_PD,
|
||||
&dev->shadow_sa_pool_pa,
|
||||
@@ -241,13 +241,15 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
|
||||
|
||||
static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
|
||||
{
|
||||
if (dev->pdr != NULL)
|
||||
if (dev->pdr)
|
||||
dma_free_coherent(dev->core_dev->device,
|
||||
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
|
||||
dev->pdr, dev->pdr_pa);
|
||||
|
||||
if (dev->shadow_sa_pool)
|
||||
dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
|
||||
dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
|
||||
|
||||
if (dev->shadow_sr_pool)
|
||||
dma_free_coherent(dev->core_dev->device,
|
||||
sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
|
||||
@@ -417,12 +419,12 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
|
||||
|
||||
static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
|
||||
{
|
||||
if (dev->sdr != NULL)
|
||||
if (dev->sdr)
|
||||
dma_free_coherent(dev->core_dev->device,
|
||||
sizeof(struct ce_sd) * PPC4XX_NUM_SD,
|
||||
dev->sdr, dev->sdr_pa);
|
||||
|
||||
if (dev->scatter_buffer_va != NULL)
|
||||
if (dev->scatter_buffer_va)
|
||||
dma_free_coherent(dev->core_dev->device,
|
||||
dev->scatter_buffer_size * PPC4XX_NUM_SD,
|
||||
dev->scatter_buffer_va,
|
||||
@@ -1034,12 +1036,10 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
|
||||
break;
|
||||
}
|
||||
|
||||
if (rc) {
|
||||
list_del(&alg->entry);
|
||||
if (rc)
|
||||
kfree(alg);
|
||||
} else {
|
||||
else
|
||||
list_add_tail(&alg->entry, &sec_dev->alg_list);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -1193,7 +1193,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
|
||||
|
||||
rc = crypto4xx_build_gdr(core_dev->dev);
|
||||
if (rc)
|
||||
goto err_build_gdr;
|
||||
goto err_build_pdr;
|
||||
|
||||
rc = crypto4xx_build_sdr(core_dev->dev);
|
||||
if (rc)
|
||||
@@ -1236,12 +1236,11 @@ err_iomap:
|
||||
err_request_irq:
|
||||
irq_dispose_mapping(core_dev->irq);
|
||||
tasklet_kill(&core_dev->tasklet);
|
||||
crypto4xx_destroy_sdr(core_dev->dev);
|
||||
err_build_sdr:
|
||||
crypto4xx_destroy_sdr(core_dev->dev);
|
||||
crypto4xx_destroy_gdr(core_dev->dev);
|
||||
err_build_gdr:
|
||||
crypto4xx_destroy_pdr(core_dev->dev);
|
||||
err_build_pdr:
|
||||
crypto4xx_destroy_pdr(core_dev->dev);
|
||||
kfree(core_dev->dev);
|
||||
err_alloc_dev:
|
||||
kfree(core_dev);
|
||||
|
||||
@@ -172,7 +172,8 @@ static ssize_t m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
|
||||
|
||||
t[1].rx_buf = buf;
|
||||
t[1].rx_nbits = m25p80_rx_nbits(nor);
|
||||
t[1].len = min(len, spi_max_transfer_size(spi));
|
||||
t[1].len = min3(len, spi_max_transfer_size(spi),
|
||||
spi_max_message_size(spi) - t[0].len);
|
||||
spi_message_add_tail(&t[1], &m);
|
||||
|
||||
ret = spi_sync(spi, &m);
|
||||
|
||||
@@ -1063,7 +1063,8 @@ static int bcm_enet_open(struct net_device *dev)
|
||||
val = enet_readl(priv, ENET_CTL_REG);
|
||||
val |= ENET_CTL_ENABLE_MASK;
|
||||
enet_writel(priv, val, ENET_CTL_REG);
|
||||
enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
|
||||
if (priv->dma_has_sram)
|
||||
enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
|
||||
enet_dmac_writel(priv, priv->dma_chan_en_mask,
|
||||
ENETDMAC_CHANCFG, priv->rx_chan);
|
||||
|
||||
@@ -1790,7 +1791,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
|
||||
ret = PTR_ERR(priv->mac_clk);
|
||||
goto out;
|
||||
}
|
||||
clk_prepare_enable(priv->mac_clk);
|
||||
ret = clk_prepare_enable(priv->mac_clk);
|
||||
if (ret)
|
||||
goto out_put_clk_mac;
|
||||
|
||||
/* initialize default and fetch platform data */
|
||||
priv->rx_ring_size = BCMENET_DEF_RX_DESC;
|
||||
@@ -1822,9 +1825,11 @@ static int bcm_enet_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(priv->phy_clk)) {
|
||||
ret = PTR_ERR(priv->phy_clk);
|
||||
priv->phy_clk = NULL;
|
||||
goto out_put_clk_mac;
|
||||
goto out_disable_clk_mac;
|
||||
}
|
||||
clk_prepare_enable(priv->phy_clk);
|
||||
ret = clk_prepare_enable(priv->phy_clk);
|
||||
if (ret)
|
||||
goto out_put_clk_phy;
|
||||
}
|
||||
|
||||
/* do minimal hardware init to be able to probe mii bus */
|
||||
@@ -1915,13 +1920,16 @@ out_free_mdio:
|
||||
out_uninit_hw:
|
||||
/* turn off mdc clock */
|
||||
enet_writel(priv, 0, ENET_MIISC_REG);
|
||||
if (priv->phy_clk) {
|
||||
if (priv->phy_clk)
|
||||
clk_disable_unprepare(priv->phy_clk);
|
||||
clk_put(priv->phy_clk);
|
||||
}
|
||||
|
||||
out_put_clk_mac:
|
||||
out_put_clk_phy:
|
||||
if (priv->phy_clk)
|
||||
clk_put(priv->phy_clk);
|
||||
|
||||
out_disable_clk_mac:
|
||||
clk_disable_unprepare(priv->mac_clk);
|
||||
out_put_clk_mac:
|
||||
clk_put(priv->mac_clk);
|
||||
out:
|
||||
free_netdev(dev);
|
||||
@@ -2766,7 +2774,9 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
|
||||
ret = PTR_ERR(priv->mac_clk);
|
||||
goto out_unmap;
|
||||
}
|
||||
clk_enable(priv->mac_clk);
|
||||
ret = clk_prepare_enable(priv->mac_clk);
|
||||
if (ret)
|
||||
goto out_put_clk;
|
||||
|
||||
priv->rx_chan = 0;
|
||||
priv->tx_chan = 1;
|
||||
@@ -2787,7 +2797,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
|
||||
|
||||
ret = register_netdev(dev);
|
||||
if (ret)
|
||||
goto out_put_clk;
|
||||
goto out_disable_clk;
|
||||
|
||||
netif_carrier_off(dev);
|
||||
platform_set_drvdata(pdev, dev);
|
||||
@@ -2796,6 +2806,9 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
|
||||
|
||||
return 0;
|
||||
|
||||
out_disable_clk:
|
||||
clk_disable_unprepare(priv->mac_clk);
|
||||
|
||||
out_put_clk:
|
||||
clk_put(priv->mac_clk);
|
||||
|
||||
@@ -2827,6 +2840,9 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
release_mem_region(res->start, resource_size(res));
|
||||
|
||||
clk_disable_unprepare(priv->mac_clk);
|
||||
clk_put(priv->mac_clk);
|
||||
|
||||
free_netdev(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -50,6 +50,7 @@
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include "common.h"
|
||||
@@ -2259,6 +2260,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
||||
|
||||
if (t.qset_idx >= nqsets)
|
||||
return -EINVAL;
|
||||
t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
|
||||
|
||||
q = &adapter->params.sge.qset[q1 + t.qset_idx];
|
||||
t.rspq_size = q->rspq_size;
|
||||
|
||||
@@ -784,6 +784,7 @@ static void cmd_work_handler(struct work_struct *work)
|
||||
struct semaphore *sem;
|
||||
unsigned long flags;
|
||||
int alloc_ret;
|
||||
int cmd_mode;
|
||||
|
||||
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
|
||||
down(sem);
|
||||
@@ -830,6 +831,7 @@ static void cmd_work_handler(struct work_struct *work)
|
||||
set_signature(ent, !cmd->checksum_disabled);
|
||||
dump_command(dev, ent, 1);
|
||||
ent->ts1 = ktime_get_ns();
|
||||
cmd_mode = cmd->mode;
|
||||
|
||||
if (ent->callback)
|
||||
schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
|
||||
@@ -854,7 +856,7 @@ static void cmd_work_handler(struct work_struct *work)
|
||||
iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
|
||||
mmiowb();
|
||||
/* if not in polling don't use ent after this point */
|
||||
if (cmd->mode == CMD_MODE_POLLING) {
|
||||
if (cmd_mode == CMD_MODE_POLLING) {
|
||||
poll_timeout(ent);
|
||||
/* make sure we read the descriptor after ownership is SW */
|
||||
rmb();
|
||||
@@ -1256,7 +1258,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
|
||||
{
|
||||
struct mlx5_core_dev *dev = filp->private_data;
|
||||
struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
|
||||
char outlen_str[8];
|
||||
char outlen_str[8] = {0};
|
||||
int outlen;
|
||||
void *ptr;
|
||||
int err;
|
||||
@@ -1271,8 +1273,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
|
||||
if (copy_from_user(outlen_str, buf, count))
|
||||
return -EFAULT;
|
||||
|
||||
outlen_str[7] = 0;
|
||||
|
||||
err = sscanf(outlen_str, "%d", &outlen);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
@@ -575,7 +575,7 @@ EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc);
|
||||
static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
|
||||
int inlen)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(qtct_reg)];
|
||||
u32 out[MLX5_ST_SZ_DW(qetc_reg)];
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, ets))
|
||||
return -ENOTSUPP;
|
||||
@@ -587,7 +587,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
|
||||
static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
|
||||
int outlen)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(qtct_reg)];
|
||||
u32 in[MLX5_ST_SZ_DW(qetc_reg)];
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, ets))
|
||||
return -ENOTSUPP;
|
||||
|
||||
@@ -677,9 +677,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
|
||||
p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
|
||||
|
||||
memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
|
||||
ARRAY_SIZE(p_local->local_chassis_id));
|
||||
sizeof(p_local->local_chassis_id));
|
||||
memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
|
||||
ARRAY_SIZE(p_local->local_port_id));
|
||||
sizeof(p_local->local_port_id));
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -692,9 +692,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
|
||||
p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
|
||||
|
||||
memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
|
||||
ARRAY_SIZE(p_remote->peer_chassis_id));
|
||||
sizeof(p_remote->peer_chassis_id));
|
||||
memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
|
||||
ARRAY_SIZE(p_remote->peer_port_id));
|
||||
sizeof(p_remote->peer_port_id));
|
||||
}
|
||||
|
||||
static int
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/qed/qed_if.h>
|
||||
#include <linux/qed/qed_ll2_if.h>
|
||||
#include <linux/crash_dump.h>
|
||||
|
||||
#include "qed.h"
|
||||
#include "qed_sriov.h"
|
||||
@@ -701,6 +702,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
|
||||
/* We want a minimum of one slowpath and one fastpath vector per hwfn */
|
||||
cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
|
||||
|
||||
if (is_kdump_kernel()) {
|
||||
DP_INFO(cdev,
|
||||
"Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
|
||||
cdev->int_params.in.min_msix_cnt);
|
||||
cdev->int_params.in.num_vectors =
|
||||
cdev->int_params.in.min_msix_cnt;
|
||||
}
|
||||
|
||||
rc = qed_set_int_mode(cdev, false);
|
||||
if (rc) {
|
||||
DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
|
||||
|
||||
@@ -59,8 +59,7 @@
|
||||
#include <linux/sungem_phy.h>
|
||||
#include "sungem.h"
|
||||
|
||||
/* Stripping FCS is causing problems, disabled for now */
|
||||
#undef STRIP_FCS
|
||||
#define STRIP_FCS
|
||||
|
||||
#define DEFAULT_MSG (NETIF_MSG_DRV | \
|
||||
NETIF_MSG_PROBE | \
|
||||
@@ -434,7 +433,7 @@ static int gem_rxmac_reset(struct gem *gp)
|
||||
writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
|
||||
writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
|
||||
val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
|
||||
((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
|
||||
(ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
|
||||
writel(val, gp->regs + RXDMA_CFG);
|
||||
if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
|
||||
writel(((5 & RXDMA_BLANK_IPKTS) |
|
||||
@@ -759,7 +758,6 @@ static int gem_rx(struct gem *gp, int work_to_do)
|
||||
struct net_device *dev = gp->dev;
|
||||
int entry, drops, work_done = 0;
|
||||
u32 done;
|
||||
__sum16 csum;
|
||||
|
||||
if (netif_msg_rx_status(gp))
|
||||
printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
|
||||
@@ -854,9 +852,13 @@ static int gem_rx(struct gem *gp, int work_to_do)
|
||||
skb = copy_skb;
|
||||
}
|
||||
|
||||
csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
|
||||
skb->csum = csum_unfold(csum);
|
||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||
if (likely(dev->features & NETIF_F_RXCSUM)) {
|
||||
__sum16 csum;
|
||||
|
||||
csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
|
||||
skb->csum = csum_unfold(csum);
|
||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||
}
|
||||
skb->protocol = eth_type_trans(skb, gp->dev);
|
||||
|
||||
napi_gro_receive(&gp->napi, skb);
|
||||
@@ -1754,7 +1756,7 @@ static void gem_init_dma(struct gem *gp)
|
||||
writel(0, gp->regs + TXDMA_KICK);
|
||||
|
||||
val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
|
||||
((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
|
||||
(ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
|
||||
writel(val, gp->regs + RXDMA_CFG);
|
||||
|
||||
writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
|
||||
@@ -2972,8 +2974,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
pci_set_drvdata(pdev, dev);
|
||||
|
||||
/* We can do scatter/gather and HW checksum */
|
||||
dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
|
||||
dev->features |= dev->hw_features | NETIF_F_RXCSUM;
|
||||
dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
|
||||
dev->features = dev->hw_features;
|
||||
if (pci_using_dac)
|
||||
dev->features |= NETIF_F_HIGHDMA;
|
||||
|
||||
|
||||
@@ -525,7 +525,8 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
|
||||
ipvlan->dev = dev;
|
||||
ipvlan->port = port;
|
||||
ipvlan->sfeatures = IPVLAN_FEATURES;
|
||||
ipvlan_adjust_mtu(ipvlan, phy_dev);
|
||||
if (!tb[IFLA_MTU])
|
||||
ipvlan_adjust_mtu(ipvlan, phy_dev);
|
||||
INIT_LIST_HEAD(&ipvlan->addrs);
|
||||
|
||||
/* TODO Probably put random address here to be presented to the
|
||||
|
||||
@@ -2964,6 +2964,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
|
||||
pkt_cnt = 0;
|
||||
count = 0;
|
||||
length = 0;
|
||||
spin_lock_irqsave(&tqp->lock, flags);
|
||||
for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
|
||||
if (skb_is_gso(skb)) {
|
||||
if (pkt_cnt) {
|
||||
@@ -2972,7 +2973,8 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
|
||||
}
|
||||
count = 1;
|
||||
length = skb->len - TX_OVERHEAD;
|
||||
skb2 = skb_dequeue(tqp);
|
||||
__skb_unlink(skb, tqp);
|
||||
spin_unlock_irqrestore(&tqp->lock, flags);
|
||||
goto gso_skb;
|
||||
}
|
||||
|
||||
@@ -2981,6 +2983,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
|
||||
skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
|
||||
pkt_cnt++;
|
||||
}
|
||||
spin_unlock_irqrestore(&tqp->lock, flags);
|
||||
|
||||
/* copy to a single skb */
|
||||
skb = alloc_skb(skb_totallen, GFP_ATOMIC);
|
||||
|
||||
@@ -946,6 +946,7 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */
|
||||
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
|
||||
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
|
||||
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
|
||||
|
||||
@@ -3327,7 +3327,8 @@ static int rtl8152_close(struct net_device *netdev)
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
unregister_pm_notifier(&tp->pm_notifier);
|
||||
#endif
|
||||
napi_disable(&tp->napi);
|
||||
if (!test_bit(RTL8152_UNPLUG, &tp->flags))
|
||||
napi_disable(&tp->napi);
|
||||
clear_bit(WORK_ENABLE, &tp->flags);
|
||||
usb_kill_urb(tp->intr_urb);
|
||||
cancel_delayed_work_sync(&tp->schedule);
|
||||
|
||||
@@ -131,7 +131,6 @@ found_alt:
|
||||
firmware->size);
|
||||
rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
|
||||
}
|
||||
rtlpriv->rtlhal.fwsize = firmware->size;
|
||||
release_firmware(firmware);
|
||||
}
|
||||
|
||||
|
||||
@@ -1052,7 +1052,8 @@ err_used:
|
||||
if (ubufs)
|
||||
vhost_net_ubuf_put_wait_and_free(ubufs);
|
||||
err_ubufs:
|
||||
sockfd_put(sock);
|
||||
if (sock)
|
||||
sockfd_put(sock);
|
||||
err_vq:
|
||||
mutex_unlock(&vq->mutex);
|
||||
err:
|
||||
|
||||
@@ -134,6 +134,19 @@ bail:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ocfs2_lock_get_block(struct inode *inode, sector_t iblock,
|
||||
struct buffer_head *bh_result, int create)
|
||||
{
|
||||
int ret = 0;
|
||||
struct ocfs2_inode_info *oi = OCFS2_I(inode);
|
||||
|
||||
down_read(&oi->ip_alloc_sem);
|
||||
ret = ocfs2_get_block(inode, iblock, bh_result, create);
|
||||
up_read(&oi->ip_alloc_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ocfs2_get_block(struct inode *inode, sector_t iblock,
|
||||
struct buffer_head *bh_result, int create)
|
||||
{
|
||||
@@ -2120,7 +2133,7 @@ static void ocfs2_dio_free_write_ctx(struct inode *inode,
|
||||
* called like this: dio->get_blocks(dio->inode, fs_startblk,
|
||||
* fs_count, map_bh, dio->rw == WRITE);
|
||||
*/
|
||||
static int ocfs2_dio_get_block(struct inode *inode, sector_t iblock,
|
||||
static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
|
||||
struct buffer_head *bh_result, int create)
|
||||
{
|
||||
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
|
||||
@@ -2146,12 +2159,9 @@ static int ocfs2_dio_get_block(struct inode *inode, sector_t iblock,
|
||||
* while file size will be changed.
|
||||
*/
|
||||
if (pos + total_len <= i_size_read(inode)) {
|
||||
down_read(&oi->ip_alloc_sem);
|
||||
|
||||
/* This is the fast path for re-write. */
|
||||
ret = ocfs2_get_block(inode, iblock, bh_result, create);
|
||||
|
||||
up_read(&oi->ip_alloc_sem);
|
||||
|
||||
ret = ocfs2_lock_get_block(inode, iblock, bh_result, create);
|
||||
if (buffer_mapped(bh_result) &&
|
||||
!buffer_new(bh_result) &&
|
||||
ret == 0)
|
||||
@@ -2416,9 +2426,9 @@ static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
||||
return 0;
|
||||
|
||||
if (iov_iter_rw(iter) == READ)
|
||||
get_block = ocfs2_get_block;
|
||||
get_block = ocfs2_lock_get_block;
|
||||
else
|
||||
get_block = ocfs2_dio_get_block;
|
||||
get_block = ocfs2_dio_wr_get_block;
|
||||
|
||||
return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
|
||||
iter, get_block,
|
||||
|
||||
@@ -40,6 +40,9 @@ char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = {
|
||||
"panic", /* O2NM_FENCE_PANIC */
|
||||
};
|
||||
|
||||
static inline void o2nm_lock_subsystem(void);
|
||||
static inline void o2nm_unlock_subsystem(void);
|
||||
|
||||
struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
|
||||
{
|
||||
struct o2nm_node *node = NULL;
|
||||
@@ -181,7 +184,10 @@ static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
|
||||
{
|
||||
/* through the first node_set .parent
|
||||
* mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
|
||||
return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
|
||||
if (node->nd_item.ci_parent)
|
||||
return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
enum {
|
||||
@@ -194,7 +200,7 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
|
||||
size_t count)
|
||||
{
|
||||
struct o2nm_node *node = to_o2nm_node(item);
|
||||
struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
|
||||
struct o2nm_cluster *cluster;
|
||||
unsigned long tmp;
|
||||
char *p = (char *)page;
|
||||
int ret = 0;
|
||||
@@ -214,6 +220,13 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
|
||||
!test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
|
||||
return -EINVAL; /* XXX */
|
||||
|
||||
o2nm_lock_subsystem();
|
||||
cluster = to_o2nm_cluster_from_node(node);
|
||||
if (!cluster) {
|
||||
o2nm_unlock_subsystem();
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
write_lock(&cluster->cl_nodes_lock);
|
||||
if (cluster->cl_nodes[tmp])
|
||||
ret = -EEXIST;
|
||||
@@ -226,6 +239,8 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
|
||||
set_bit(tmp, cluster->cl_nodes_bitmap);
|
||||
}
|
||||
write_unlock(&cluster->cl_nodes_lock);
|
||||
o2nm_unlock_subsystem();
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -269,7 +284,7 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
|
||||
size_t count)
|
||||
{
|
||||
struct o2nm_node *node = to_o2nm_node(item);
|
||||
struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
|
||||
struct o2nm_cluster *cluster;
|
||||
int ret, i;
|
||||
struct rb_node **p, *parent;
|
||||
unsigned int octets[4];
|
||||
@@ -286,6 +301,13 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
|
||||
be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
|
||||
}
|
||||
|
||||
o2nm_lock_subsystem();
|
||||
cluster = to_o2nm_cluster_from_node(node);
|
||||
if (!cluster) {
|
||||
o2nm_unlock_subsystem();
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
write_lock(&cluster->cl_nodes_lock);
|
||||
if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
|
||||
@@ -298,6 +320,8 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
|
||||
rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
|
||||
}
|
||||
write_unlock(&cluster->cl_nodes_lock);
|
||||
o2nm_unlock_subsystem();
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -315,7 +339,7 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
|
||||
size_t count)
|
||||
{
|
||||
struct o2nm_node *node = to_o2nm_node(item);
|
||||
struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
|
||||
struct o2nm_cluster *cluster;
|
||||
unsigned long tmp;
|
||||
char *p = (char *)page;
|
||||
ssize_t ret;
|
||||
@@ -333,17 +357,26 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
|
||||
!test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
|
||||
return -EINVAL; /* XXX */
|
||||
|
||||
o2nm_lock_subsystem();
|
||||
cluster = to_o2nm_cluster_from_node(node);
|
||||
if (!cluster) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* the only failure case is trying to set a new local node
|
||||
* when a different one is already set */
|
||||
if (tmp && tmp == cluster->cl_has_local &&
|
||||
cluster->cl_local_node != node->nd_num)
|
||||
return -EBUSY;
|
||||
cluster->cl_local_node != node->nd_num) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* bring up the rx thread if we're setting the new local node. */
|
||||
if (tmp && !cluster->cl_has_local) {
|
||||
ret = o2net_start_listening(node);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!tmp && cluster->cl_has_local &&
|
||||
@@ -358,7 +391,11 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
|
||||
cluster->cl_local_node = node->nd_num;
|
||||
}
|
||||
|
||||
return count;
|
||||
ret = count;
|
||||
|
||||
out:
|
||||
o2nm_unlock_subsystem();
|
||||
return ret;
|
||||
}
|
||||
|
||||
CONFIGFS_ATTR(o2nm_node_, num);
|
||||
@@ -738,6 +775,16 @@ static struct o2nm_cluster_group o2nm_cluster_group = {
|
||||
},
|
||||
};
|
||||
|
||||
static inline void o2nm_lock_subsystem(void)
|
||||
{
|
||||
mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);
|
||||
}
|
||||
|
||||
static inline void o2nm_unlock_subsystem(void)
|
||||
{
|
||||
mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);
|
||||
}
|
||||
|
||||
int o2nm_depend_item(struct config_item *item)
|
||||
{
|
||||
return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);
|
||||
|
||||
@@ -76,83 +76,99 @@ static char *le_type(struct reiserfs_key *key)
|
||||
}
|
||||
|
||||
/* %k */
|
||||
static void sprintf_le_key(char *buf, struct reiserfs_key *key)
|
||||
static int scnprintf_le_key(char *buf, size_t size, struct reiserfs_key *key)
|
||||
{
|
||||
if (key)
|
||||
sprintf(buf, "[%d %d %s %s]", le32_to_cpu(key->k_dir_id),
|
||||
le32_to_cpu(key->k_objectid), le_offset(key),
|
||||
le_type(key));
|
||||
return scnprintf(buf, size, "[%d %d %s %s]",
|
||||
le32_to_cpu(key->k_dir_id),
|
||||
le32_to_cpu(key->k_objectid), le_offset(key),
|
||||
le_type(key));
|
||||
else
|
||||
sprintf(buf, "[NULL]");
|
||||
return scnprintf(buf, size, "[NULL]");
|
||||
}
|
||||
|
||||
/* %K */
|
||||
static void sprintf_cpu_key(char *buf, struct cpu_key *key)
|
||||
static int scnprintf_cpu_key(char *buf, size_t size, struct cpu_key *key)
|
||||
{
|
||||
if (key)
|
||||
sprintf(buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id,
|
||||
key->on_disk_key.k_objectid, reiserfs_cpu_offset(key),
|
||||
cpu_type(key));
|
||||
return scnprintf(buf, size, "[%d %d %s %s]",
|
||||
key->on_disk_key.k_dir_id,
|
||||
key->on_disk_key.k_objectid,
|
||||
reiserfs_cpu_offset(key), cpu_type(key));
|
||||
else
|
||||
sprintf(buf, "[NULL]");
|
||||
return scnprintf(buf, size, "[NULL]");
|
||||
}
|
||||
|
||||
static void sprintf_de_head(char *buf, struct reiserfs_de_head *deh)
|
||||
static int scnprintf_de_head(char *buf, size_t size,
|
||||
struct reiserfs_de_head *deh)
|
||||
{
|
||||
if (deh)
|
||||
sprintf(buf,
|
||||
"[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
|
||||
deh_offset(deh), deh_dir_id(deh), deh_objectid(deh),
|
||||
deh_location(deh), deh_state(deh));
|
||||
return scnprintf(buf, size,
|
||||
"[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
|
||||
deh_offset(deh), deh_dir_id(deh),
|
||||
deh_objectid(deh), deh_location(deh),
|
||||
deh_state(deh));
|
||||
else
|
||||
sprintf(buf, "[NULL]");
|
||||
return scnprintf(buf, size, "[NULL]");
|
||||
|
||||
}
|
||||
|
||||
static void sprintf_item_head(char *buf, struct item_head *ih)
|
||||
static int scnprintf_item_head(char *buf, size_t size, struct item_head *ih)
|
||||
{
|
||||
if (ih) {
|
||||
strcpy(buf,
|
||||
(ih_version(ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*");
|
||||
sprintf_le_key(buf + strlen(buf), &(ih->ih_key));
|
||||
sprintf(buf + strlen(buf), ", item_len %d, item_location %d, "
|
||||
"free_space(entry_count) %d",
|
||||
ih_item_len(ih), ih_location(ih), ih_free_space(ih));
|
||||
char *p = buf;
|
||||
char * const end = buf + size;
|
||||
|
||||
p += scnprintf(p, end - p, "%s",
|
||||
(ih_version(ih) == KEY_FORMAT_3_6) ?
|
||||
"*3.6* " : "*3.5*");
|
||||
|
||||
p += scnprintf_le_key(p, end - p, &ih->ih_key);
|
||||
|
||||
p += scnprintf(p, end - p,
|
||||
", item_len %d, item_location %d, free_space(entry_count) %d",
|
||||
ih_item_len(ih), ih_location(ih),
|
||||
ih_free_space(ih));
|
||||
return p - buf;
|
||||
} else
|
||||
sprintf(buf, "[NULL]");
|
||||
return scnprintf(buf, size, "[NULL]");
|
||||
}
|
||||
|
||||
static void sprintf_direntry(char *buf, struct reiserfs_dir_entry *de)
|
||||
static int scnprintf_direntry(char *buf, size_t size,
|
||||
struct reiserfs_dir_entry *de)
|
||||
{
|
||||
char name[20];
|
||||
|
||||
memcpy(name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen);
|
||||
name[de->de_namelen > 19 ? 19 : de->de_namelen] = 0;
|
||||
sprintf(buf, "\"%s\"==>[%d %d]", name, de->de_dir_id, de->de_objectid);
|
||||
return scnprintf(buf, size, "\"%s\"==>[%d %d]",
|
||||
name, de->de_dir_id, de->de_objectid);
|
||||
}
|
||||
|
||||
static void sprintf_block_head(char *buf, struct buffer_head *bh)
|
||||
static int scnprintf_block_head(char *buf, size_t size, struct buffer_head *bh)
|
||||
{
|
||||
sprintf(buf, "level=%d, nr_items=%d, free_space=%d rdkey ",
|
||||
B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
|
||||
return scnprintf(buf, size,
|
||||
"level=%d, nr_items=%d, free_space=%d rdkey ",
|
||||
B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
|
||||
}
|
||||
|
||||
static void sprintf_buffer_head(char *buf, struct buffer_head *bh)
|
||||
static int scnprintf_buffer_head(char *buf, size_t size, struct buffer_head *bh)
|
||||
{
|
||||
sprintf(buf,
|
||||
"dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
|
||||
bh->b_bdev, bh->b_size,
|
||||
(unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)),
|
||||
bh->b_state, bh->b_page,
|
||||
buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
|
||||
buffer_dirty(bh) ? "DIRTY" : "CLEAN",
|
||||
buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
|
||||
return scnprintf(buf, size,
|
||||
"dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
|
||||
bh->b_bdev, bh->b_size,
|
||||
(unsigned long long)bh->b_blocknr,
|
||||
atomic_read(&(bh->b_count)),
|
||||
bh->b_state, bh->b_page,
|
||||
buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
|
||||
buffer_dirty(bh) ? "DIRTY" : "CLEAN",
|
||||
buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
|
||||
}
|
||||
|
||||
static void sprintf_disk_child(char *buf, struct disk_child *dc)
|
||||
static int scnprintf_disk_child(char *buf, size_t size, struct disk_child *dc)
|
||||
{
|
||||
sprintf(buf, "[dc_number=%d, dc_size=%u]", dc_block_number(dc),
|
||||
dc_size(dc));
|
||||
return scnprintf(buf, size, "[dc_number=%d, dc_size=%u]",
|
||||
dc_block_number(dc), dc_size(dc));
|
||||
}
|
||||
|
||||
static char *is_there_reiserfs_struct(char *fmt, int *what)
|
||||
@@ -189,55 +205,60 @@ static void prepare_error_buf(const char *fmt, va_list args)
|
||||
char *fmt1 = fmt_buf;
|
||||
char *k;
|
||||
char *p = error_buf;
|
||||
char * const end = &error_buf[sizeof(error_buf)];
|
||||
int what;
|
||||
|
||||
spin_lock(&error_lock);
|
||||
|
||||
strcpy(fmt1, fmt);
|
||||
if (WARN_ON(strscpy(fmt_buf, fmt, sizeof(fmt_buf)) < 0)) {
|
||||
strscpy(error_buf, "format string too long", end - error_buf);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
while ((k = is_there_reiserfs_struct(fmt1, &what)) != NULL) {
|
||||
*k = 0;
|
||||
|
||||
p += vsprintf(p, fmt1, args);
|
||||
p += vscnprintf(p, end - p, fmt1, args);
|
||||
|
||||
switch (what) {
|
||||
case 'k':
|
||||
sprintf_le_key(p, va_arg(args, struct reiserfs_key *));
|
||||
p += scnprintf_le_key(p, end - p,
|
||||
va_arg(args, struct reiserfs_key *));
|
||||
break;
|
||||
case 'K':
|
||||
sprintf_cpu_key(p, va_arg(args, struct cpu_key *));
|
||||
p += scnprintf_cpu_key(p, end - p,
|
||||
va_arg(args, struct cpu_key *));
|
||||
break;
|
||||
case 'h':
|
||||
sprintf_item_head(p, va_arg(args, struct item_head *));
|
||||
p += scnprintf_item_head(p, end - p,
|
||||
va_arg(args, struct item_head *));
|
||||
break;
|
||||
case 't':
|
||||
sprintf_direntry(p,
|
||||
va_arg(args,
|
||||
struct reiserfs_dir_entry *));
|
||||
p += scnprintf_direntry(p, end - p,
|
||||
va_arg(args, struct reiserfs_dir_entry *));
|
||||
break;
|
||||
case 'y':
|
||||
sprintf_disk_child(p,
|
||||
va_arg(args, struct disk_child *));
|
||||
p += scnprintf_disk_child(p, end - p,
|
||||
va_arg(args, struct disk_child *));
|
||||
break;
|
||||
case 'z':
|
||||
sprintf_block_head(p,
|
||||
va_arg(args, struct buffer_head *));
|
||||
p += scnprintf_block_head(p, end - p,
|
||||
va_arg(args, struct buffer_head *));
|
||||
break;
|
||||
case 'b':
|
||||
sprintf_buffer_head(p,
|
||||
va_arg(args, struct buffer_head *));
|
||||
p += scnprintf_buffer_head(p, end - p,
|
||||
va_arg(args, struct buffer_head *));
|
||||
break;
|
||||
case 'a':
|
||||
sprintf_de_head(p,
|
||||
va_arg(args,
|
||||
struct reiserfs_de_head *));
|
||||
p += scnprintf_de_head(p, end - p,
|
||||
va_arg(args, struct reiserfs_de_head *));
|
||||
break;
|
||||
}
|
||||
|
||||
p += strlen(p);
|
||||
fmt1 = k + 2;
|
||||
}
|
||||
vsprintf(p, fmt1, args);
|
||||
p += vscnprintf(p, end - p, fmt1, args);
|
||||
out_unlock:
|
||||
spin_unlock(&error_lock);
|
||||
|
||||
}
|
||||
|
||||
@@ -80,6 +80,11 @@
|
||||
ARM_SMCCC_SMC_32, \
|
||||
0, 0x8000)
|
||||
|
||||
#define ARM_SMCCC_ARCH_WORKAROUND_2 \
|
||||
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
|
||||
ARM_SMCCC_SMC_32, \
|
||||
0, 0x7fff)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/linkage.h>
|
||||
@@ -293,5 +298,10 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
|
||||
*/
|
||||
#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
|
||||
|
||||
/* Return codes defined in ARM DEN 0070A */
|
||||
#define SMCCC_RET_SUCCESS 0
|
||||
#define SMCCC_RET_NOT_SUPPORTED -1
|
||||
#define SMCCC_RET_NOT_REQUIRED -2
|
||||
|
||||
#endif /*__ASSEMBLY__*/
|
||||
#endif /*__LINUX_ARM_SMCCC_H*/
|
||||
|
||||
@@ -64,6 +64,18 @@
|
||||
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Feature detection for gnu_inline (gnu89 extern inline semantics). Either
|
||||
* __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
|
||||
* and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
|
||||
* defined so the gnu89 semantics are the default.
|
||||
*/
|
||||
#ifdef __GNUC_STDC_INLINE__
|
||||
# define __gnu_inline __attribute__((gnu_inline))
|
||||
#else
|
||||
# define __gnu_inline
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Force always-inline if the user requests it so via the .config,
|
||||
* or if gcc is too old.
|
||||
@@ -71,19 +83,22 @@
|
||||
* -Wunused-function. This turns out to avoid the need for complex #ifdef
|
||||
* directives. Suppress the warning in clang as well by using "unused"
|
||||
* function attribute, which is redundant but not harmful for gcc.
|
||||
* Prefer gnu_inline, so that extern inline functions do not emit an
|
||||
* externally visible function. This makes extern inline behave as per gnu89
|
||||
* semantics rather than c99. This prevents multiple symbol definition errors
|
||||
* of extern inline functions at link time.
|
||||
* A lot of inline functions can cause havoc with function tracing.
|
||||
*/
|
||||
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
|
||||
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
|
||||
#define inline inline __attribute__((always_inline,unused)) notrace
|
||||
#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace
|
||||
#define __inline __inline __attribute__((always_inline,unused)) notrace
|
||||
#define inline \
|
||||
inline __attribute__((always_inline, unused)) notrace __gnu_inline
|
||||
#else
|
||||
/* A lot of inline functions can cause havoc with function tracing */
|
||||
#define inline inline __attribute__((unused)) notrace
|
||||
#define __inline__ __inline__ __attribute__((unused)) notrace
|
||||
#define __inline __inline __attribute__((unused)) notrace
|
||||
#define inline inline __attribute__((unused)) notrace __gnu_inline
|
||||
#endif
|
||||
|
||||
#define __inline__ inline
|
||||
#define __inline inline
|
||||
#define __always_inline inline __attribute__((always_inline))
|
||||
#define noinline __attribute__((noinline))
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ extern char * strncpy(char *,const char *, __kernel_size_t);
|
||||
size_t strlcpy(char *, const char *, size_t);
|
||||
#endif
|
||||
#ifndef __HAVE_ARCH_STRSCPY
|
||||
ssize_t __must_check strscpy(char *, const char *, size_t);
|
||||
ssize_t strscpy(char *, const char *, size_t);
|
||||
#endif
|
||||
#ifndef __HAVE_ARCH_STRCAT
|
||||
extern char * strcat(char *, const char *);
|
||||
|
||||
@@ -406,6 +406,12 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
|
||||
watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
|
||||
if (IS_ERR(watcher))
|
||||
return PTR_ERR(watcher);
|
||||
|
||||
if (watcher->family != NFPROTO_BRIDGE) {
|
||||
module_put(watcher->me);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
w->u.watcher = watcher;
|
||||
|
||||
par->target = watcher;
|
||||
@@ -727,6 +733,13 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
|
||||
goto cleanup_watchers;
|
||||
}
|
||||
|
||||
/* Reject UNSPEC, xtables verdicts/return values are incompatible */
|
||||
if (target->family != NFPROTO_BRIDGE) {
|
||||
module_put(target->me);
|
||||
ret = -ENOENT;
|
||||
goto cleanup_watchers;
|
||||
}
|
||||
|
||||
t->u.target = target;
|
||||
if (t->u.target == &ebt_standard_target) {
|
||||
if (gap < sizeof(struct ebt_standard_target)) {
|
||||
|
||||
@@ -599,7 +599,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
|
||||
{
|
||||
struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
|
||||
struct dccp_sock *dp = dccp_sk(sk);
|
||||
ktime_t now = ktime_get_real();
|
||||
ktime_t now = ktime_get();
|
||||
s64 delta = 0;
|
||||
|
||||
switch (fbtype) {
|
||||
@@ -624,15 +624,14 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
|
||||
case CCID3_FBACK_PERIODIC:
|
||||
delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
|
||||
if (delta <= 0)
|
||||
DCCP_BUG("delta (%ld) <= 0", (long)delta);
|
||||
else
|
||||
hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
|
||||
delta = 1;
|
||||
hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta,
|
||||
ccid3_pr_debug("Interval %lldusec, X_recv=%u, 1/p=%u\n", delta,
|
||||
hc->rx_x_recv, hc->rx_pinv);
|
||||
|
||||
hc->rx_tstamp_last_feedback = now;
|
||||
@@ -679,7 +678,8 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
|
||||
static u32 ccid3_first_li(struct sock *sk)
|
||||
{
|
||||
struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
|
||||
u32 x_recv, p, delta;
|
||||
u32 x_recv, p;
|
||||
s64 delta;
|
||||
u64 fval;
|
||||
|
||||
if (hc->rx_rtt == 0) {
|
||||
@@ -687,7 +687,9 @@ static u32 ccid3_first_li(struct sock *sk)
|
||||
hc->rx_rtt = DCCP_FALLBACK_RTT;
|
||||
}
|
||||
|
||||
delta = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback));
|
||||
delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback);
|
||||
if (delta <= 0)
|
||||
delta = 1;
|
||||
x_recv = scaled_div32(hc->rx_bytes_recv, delta);
|
||||
if (x_recv == 0) { /* would also trigger divide-by-zero */
|
||||
DCCP_WARN("X_recv==0\n");
|
||||
|
||||
@@ -87,35 +87,39 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
|
||||
opt++;
|
||||
kdebug("options: '%s'", opt);
|
||||
do {
|
||||
int opt_len, opt_nlen;
|
||||
const char *eq;
|
||||
int opt_len, opt_nlen, opt_vlen, tmp;
|
||||
char optval[128];
|
||||
|
||||
next_opt = memchr(opt, '#', end - opt) ?: end;
|
||||
opt_len = next_opt - opt;
|
||||
if (opt_len <= 0 || opt_len > 128) {
|
||||
if (opt_len <= 0 || opt_len > sizeof(optval)) {
|
||||
pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
|
||||
opt_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
eq = memchr(opt, '=', opt_len) ?: end;
|
||||
opt_nlen = eq - opt;
|
||||
eq++;
|
||||
opt_vlen = next_opt - eq; /* will be -1 if no value */
|
||||
eq = memchr(opt, '=', opt_len);
|
||||
if (eq) {
|
||||
opt_nlen = eq - opt;
|
||||
eq++;
|
||||
memcpy(optval, eq, next_opt - eq);
|
||||
optval[next_opt - eq] = '\0';
|
||||
} else {
|
||||
opt_nlen = opt_len;
|
||||
optval[0] = '\0';
|
||||
}
|
||||
|
||||
tmp = opt_vlen >= 0 ? opt_vlen : 0;
|
||||
kdebug("option '%*.*s' val '%*.*s'",
|
||||
opt_nlen, opt_nlen, opt, tmp, tmp, eq);
|
||||
kdebug("option '%*.*s' val '%s'",
|
||||
opt_nlen, opt_nlen, opt, optval);
|
||||
|
||||
/* see if it's an error number representing a DNS error
|
||||
* that's to be recorded as the result in this key */
|
||||
if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
|
||||
memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
|
||||
kdebug("dns error number option");
|
||||
if (opt_vlen <= 0)
|
||||
goto bad_option_value;
|
||||
|
||||
ret = kstrtoul(eq, 10, &derrno);
|
||||
ret = kstrtoul(optval, 10, &derrno);
|
||||
if (ret < 0)
|
||||
goto bad_option_value;
|
||||
|
||||
|
||||
@@ -227,8 +227,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
|
||||
{
|
||||
struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
|
||||
struct tcp_fastopen_context *ctxt;
|
||||
int ret;
|
||||
u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
|
||||
__le32 key[4];
|
||||
int ret, i;
|
||||
|
||||
tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
|
||||
if (!tbl.data)
|
||||
@@ -237,11 +238,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
|
||||
rcu_read_lock();
|
||||
ctxt = rcu_dereference(tcp_fastopen_ctx);
|
||||
if (ctxt)
|
||||
memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
|
||||
memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
|
||||
else
|
||||
memset(user_key, 0, sizeof(user_key));
|
||||
memset(key, 0, sizeof(key));
|
||||
rcu_read_unlock();
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(key); i++)
|
||||
user_key[i] = le32_to_cpu(key[i]);
|
||||
|
||||
snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
|
||||
user_key[0], user_key[1], user_key[2], user_key[3]);
|
||||
ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
|
||||
@@ -257,12 +261,16 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
|
||||
* first invocation of tcp_fastopen_cookie_gen
|
||||
*/
|
||||
tcp_fastopen_init_key_once(false);
|
||||
tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(user_key); i++)
|
||||
key[i] = cpu_to_le32(user_key[i]);
|
||||
|
||||
tcp_fastopen_reset_cipher(key, TCP_FASTOPEN_KEY_LENGTH);
|
||||
}
|
||||
|
||||
bad_key:
|
||||
pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
|
||||
user_key[0], user_key[1], user_key[2], user_key[3],
|
||||
user_key[0], user_key[1], user_key[2], user_key[3],
|
||||
(char *)tbl.data, ret);
|
||||
kfree(tbl.data);
|
||||
return ret;
|
||||
|
||||
@@ -3237,6 +3237,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
|
||||
|
||||
if (tcp_is_reno(tp)) {
|
||||
tcp_remove_reno_sacks(sk, pkts_acked);
|
||||
|
||||
/* If any of the cumulatively ACKed segments was
|
||||
* retransmitted, non-SACK case cannot confirm that
|
||||
* progress was due to original transmission due to
|
||||
* lack of TCPCB_SACKED_ACKED bits even if some of
|
||||
* the packets may have been never retransmitted.
|
||||
*/
|
||||
if (flag & FLAG_RETRANS_DATA_ACKED)
|
||||
flag &= ~FLAG_ORIG_SACK_ACKED;
|
||||
} else {
|
||||
int delta;
|
||||
|
||||
|
||||
@@ -618,6 +618,8 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
|
||||
fq->q.meat == fq->q.len &&
|
||||
nf_ct_frag6_reasm(fq, skb, dev))
|
||||
ret = 0;
|
||||
else
|
||||
skb_dst_drop(skb);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_bh(&fq->q.lock);
|
||||
|
||||
@@ -753,11 +753,14 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
|
||||
pr_debug("Fragment %zd bytes remaining %zd",
|
||||
frag_len, remaining_len);
|
||||
|
||||
pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
|
||||
pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0,
|
||||
frag_len + LLCP_HEADER_SIZE, &err);
|
||||
if (pdu == NULL) {
|
||||
pr_err("Could not allocate PDU\n");
|
||||
continue;
|
||||
pr_err("Could not allocate PDU (error=%d)\n", err);
|
||||
len -= remaining_len;
|
||||
if (len == 0)
|
||||
len = err;
|
||||
break;
|
||||
}
|
||||
|
||||
pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI);
|
||||
|
||||
@@ -2265,6 +2265,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
if (po->stats.stats1.tp_drops)
|
||||
status |= TP_STATUS_LOSING;
|
||||
}
|
||||
|
||||
if (do_vnet &&
|
||||
__packet_rcv_vnet(skb, h.raw + macoff -
|
||||
sizeof(struct virtio_net_hdr)))
|
||||
goto drop_n_account;
|
||||
|
||||
po->stats.stats1.tp_packets++;
|
||||
if (copy_skb) {
|
||||
status |= TP_STATUS_COPY;
|
||||
@@ -2272,14 +2278,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
}
|
||||
spin_unlock(&sk->sk_receive_queue.lock);
|
||||
|
||||
if (do_vnet) {
|
||||
if (__packet_rcv_vnet(skb, h.raw + macoff -
|
||||
sizeof(struct virtio_net_hdr))) {
|
||||
spin_lock(&sk->sk_receive_queue.lock);
|
||||
goto drop_n_account;
|
||||
}
|
||||
}
|
||||
|
||||
skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
|
||||
|
||||
if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
|
||||
|
||||
@@ -193,4 +193,5 @@ struct rds_transport rds_loop_transport = {
|
||||
.inc_copy_to_user = rds_message_inc_copy_to_user,
|
||||
.inc_free = rds_loop_inc_free,
|
||||
.t_name = "loopback",
|
||||
.t_type = RDS_TRANS_LOOP,
|
||||
};
|
||||
|
||||
@@ -440,6 +440,11 @@ struct rds_notifier {
|
||||
int n_status;
|
||||
};
|
||||
|
||||
/* Available as part of RDS core, so doesn't need to participate
|
||||
* in get_preferred transport etc
|
||||
*/
|
||||
#define RDS_TRANS_LOOP 3
|
||||
|
||||
/**
|
||||
* struct rds_transport - transport specific behavioural hooks
|
||||
*
|
||||
|
||||
@@ -94,6 +94,11 @@ static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
|
||||
return;
|
||||
|
||||
rs->rs_rcv_bytes += delta;
|
||||
|
||||
/* loop transport doesn't send/recv congestion updates */
|
||||
if (rs->rs_transport->t_type == RDS_TRANS_LOOP)
|
||||
return;
|
||||
|
||||
now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
|
||||
|
||||
rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
|
||||
|
||||
@@ -21,7 +21,7 @@ static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
qdisc_drop(skb, sch, to_free);
|
||||
return NET_XMIT_SUCCESS;
|
||||
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||
}
|
||||
|
||||
static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
|
||||
|
||||
@@ -203,7 +203,7 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
|
||||
return -1;
|
||||
|
||||
rd = kvm_vcpu_dabt_get_rd(vcpu);
|
||||
addr = kern_hyp_va((kern_hyp_va(&kvm_vgic_global_state))->vcpu_base_va);
|
||||
addr = kern_hyp_va(hyp_symbol_addr(kvm_vgic_global_state)->vcpu_base_va);
|
||||
addr += fault_ipa - vgic->vgic_cpu_base;
|
||||
|
||||
if (kvm_vcpu_dabt_iswrite(vcpu)) {
|
||||
|
||||
Reference in New Issue
Block a user