Merge 4.9.78 into android-4.9-o
Changes in 4.9.78 libnvdimm, btt: Fix an incompatibility in the log layout scsi: sg: disable SET_FORCE_LOW_DMA futex: Prevent overflow by strengthen input validation ALSA: seq: Make ioctls race-free ALSA: pcm: Remove yet superfluous WARN_ON() ALSA: hda - Apply headphone noise quirk for another Dell XPS 13 variant ALSA: hda - Apply the existing quirk to iMac 14,1 timers: Unconditionally check deferrable base af_key: fix buffer overread in verify_address_len() af_key: fix buffer overread in parse_exthdrs() iser-target: Fix possible use-after-free in connection establishment error scsi: hpsa: fix volume offline state sched/deadline: Zero out positive runtime after throttling constrained tasks x86/retpoline: Fill RSB on context switch for affected CPUs x86/retpoline: Add LFENCE to the retpoline/RSB filling RSB macros objtool: Improve error message for bad file argument x86/cpufeature: Move processor tracing out of scattered features module: Add retpoline tag to VERMAGIC x86/mm/pkeys: Fix fill_sig_info_pkey x86/tsc: Fix erroneous TSC rate on Skylake Xeon pipe: avoid round_pipe_size() nr_pages overflow on 32-bit x86/apic/vector: Fix off by one in error path perf tools: Fix build with ARCH=x86_64 Input: ALPS - fix multi-touch decoding on SS4 plus touchpads Input: 88pm860x-ts - fix child-node lookup Input: twl6040-vibra - fix child-node lookup Input: twl4030-vibra - fix sibling-node lookup tracing: Fix converting enum's from the map in trace_event_eval_update() phy: work around 'phys' references to usb-nop-xceiv devices ARM: sunxi_defconfig: Enable CMA ARM: dts: kirkwood: fix pin-muxing of MPP7 on OpenBlocks A7 can: peak: fix potential bug in packet fragmentation scripts/gdb/linux/tasks.py: fix get_thread_info proc: fix coredump vs read /proc/*/stat race libata: apply MAX_SEC_1024 to all LITEON EP1 series devices workqueue: avoid hard lockups in show_workqueue_state() dm btree: fix serious bug in btree_split_beneath() dm thin metadata: THIN_MAX_CONCURRENT_LOCKS should be 6 arm64: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls x86/cpu, x86/pti: Do not enable PTI on AMD processors usbip: fix warning in vhci_hcd_probe/lockdep_init_map x86/mce: Make machine check speculation protected retpoline: Introduce start/end markers of indirect thunk kprobes/x86: Blacklist indirect thunk functions for kprobes kprobes/x86: Disable optimizing on the function jumps to indirect thunk x86/pti: Document fix wrong index x86/retpoline: Optimize inline assembler for vmexit_fill_RSB MIPS: AR7: ensure the port type's FCR value is used Linux 4.9.78 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
@@ -78,7 +78,7 @@ this protection comes at a cost:
|
||||
non-PTI SYSCALL entry code, so requires mapping fewer
|
||||
things into the userspace page tables. The downside is
|
||||
that stacks must be switched at entry time.
|
||||
d. Global pages are disabled for all kernel structures not
|
||||
c. Global pages are disabled for all kernel structures not
|
||||
mapped into both kernel and userspace page tables. This
|
||||
feature of the MMU allows different processes to share TLB
|
||||
entries mapping the kernel. Losing the feature means more
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,6 +1,6 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 77
|
||||
SUBLEVEL = 78
|
||||
EXTRAVERSION =
|
||||
NAME = Roaring Lionus
|
||||
|
||||
|
||||
@@ -53,7 +53,8 @@
|
||||
};
|
||||
|
||||
pinctrl: pin-controller@10000 {
|
||||
pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header>;
|
||||
pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header
|
||||
&pmx_gpio_header_gpo>;
|
||||
pinctrl-names = "default";
|
||||
|
||||
pmx_uart0: pmx-uart0 {
|
||||
@@ -85,11 +86,16 @@
|
||||
* ground.
|
||||
*/
|
||||
pmx_gpio_header: pmx-gpio-header {
|
||||
marvell,pins = "mpp17", "mpp7", "mpp29", "mpp28",
|
||||
marvell,pins = "mpp17", "mpp29", "mpp28",
|
||||
"mpp35", "mpp34", "mpp40";
|
||||
marvell,function = "gpio";
|
||||
};
|
||||
|
||||
pmx_gpio_header_gpo: pxm-gpio-header-gpo {
|
||||
marvell,pins = "mpp7";
|
||||
marvell,function = "gpo";
|
||||
};
|
||||
|
||||
pmx_gpio_init: pmx-init {
|
||||
marvell,pins = "mpp38";
|
||||
marvell,function = "gpio";
|
||||
|
||||
@@ -11,6 +11,7 @@ CONFIG_SMP=y
|
||||
CONFIG_NR_CPUS=8
|
||||
CONFIG_AEABI=y
|
||||
CONFIG_HIGHMEM=y
|
||||
CONFIG_CMA=y
|
||||
CONFIG_ARM_APPENDED_DTB=y
|
||||
CONFIG_ARM_ATAG_DTB_COMPAT=y
|
||||
CONFIG_CPU_FREQ=y
|
||||
@@ -35,6 +36,7 @@ CONFIG_CAN_SUN4I=y
|
||||
# CONFIG_WIRELESS is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
CONFIG_DEVTMPFS_MOUNT=y
|
||||
CONFIG_DMA_CMA=y
|
||||
CONFIG_BLK_DEV_SD=y
|
||||
CONFIG_ATA=y
|
||||
CONFIG_AHCI_SUNXI=y
|
||||
|
||||
@@ -44,7 +44,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
|
||||
ret = kvm_psci_call(vcpu);
|
||||
if (ret < 0) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
vcpu_set_reg(vcpu, 0, ~0UL);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -53,7 +53,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
|
||||
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
kvm_inject_undefined(vcpu);
|
||||
vcpu_set_reg(vcpu, 0, ~0UL);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
@@ -576,7 +576,7 @@ static int __init ar7_register_uarts(void)
|
||||
uart_port.type = PORT_AR7;
|
||||
uart_port.uartclk = clk_get_rate(bus_clk) / 2;
|
||||
uart_port.iotype = UPIO_MEM32;
|
||||
uart_port.flags = UPF_FIXED_TYPE;
|
||||
uart_port.flags = UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF;
|
||||
uart_port.regshift = 2;
|
||||
|
||||
uart_port.line = 0;
|
||||
|
||||
@@ -229,6 +229,17 @@ ENTRY(__switch_to_asm)
|
||||
movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
/*
|
||||
* When switching from a shallower to a deeper call stack
|
||||
* the RSB may either underflow or use entries populated
|
||||
* with userspace addresses. On CPUs where those concerns
|
||||
* exist, overwrite the RSB with entries which capture
|
||||
* speculative execution to prevent attack.
|
||||
*/
|
||||
FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
|
||||
#endif
|
||||
|
||||
/* restore callee-saved registers */
|
||||
popl %esi
|
||||
popl %edi
|
||||
|
||||
@@ -427,6 +427,17 @@ ENTRY(__switch_to_asm)
|
||||
movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
/*
|
||||
* When switching from a shallower to a deeper call stack
|
||||
* the RSB may either underflow or use entries populated
|
||||
* with userspace addresses. On CPUs where those concerns
|
||||
* exist, overwrite the RSB with entries which capture
|
||||
* speculative execution to prevent attack.
|
||||
*/
|
||||
FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
|
||||
#endif
|
||||
|
||||
/* restore callee-saved registers */
|
||||
popq %r15
|
||||
popq %r14
|
||||
@@ -1053,7 +1064,7 @@ idtentry async_page_fault do_async_page_fault has_error_code=1
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_MCE
|
||||
idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip)
|
||||
idtentry machine_check do_mce has_error_code=0 paranoid=1
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
||||
@@ -197,9 +197,9 @@
|
||||
#define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
|
||||
|
||||
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
|
||||
#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
|
||||
#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
|
||||
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
|
||||
|
||||
/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
|
||||
#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
|
||||
@@ -235,6 +235,7 @@
|
||||
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
|
||||
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
|
||||
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
|
||||
#define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */
|
||||
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
|
||||
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
|
||||
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
* Fill the CPU return stack buffer.
|
||||
*
|
||||
* Each entry in the RSB, if used for a speculative 'ret', contains an
|
||||
* infinite 'pause; jmp' loop to capture speculative execution.
|
||||
* infinite 'pause; lfence; jmp' loop to capture speculative execution.
|
||||
*
|
||||
* This is required in various cases for retpoline and IBRS-based
|
||||
* mitigations for the Spectre variant 2 vulnerability. Sometimes to
|
||||
@@ -38,11 +38,13 @@
|
||||
call 772f; \
|
||||
773: /* speculation trap */ \
|
||||
pause; \
|
||||
lfence; \
|
||||
jmp 773b; \
|
||||
772: \
|
||||
call 774f; \
|
||||
775: /* speculation trap */ \
|
||||
pause; \
|
||||
lfence; \
|
||||
jmp 775b; \
|
||||
774: \
|
||||
dec reg; \
|
||||
@@ -73,6 +75,7 @@
|
||||
call .Ldo_rop_\@
|
||||
.Lspec_trap_\@:
|
||||
pause
|
||||
lfence
|
||||
jmp .Lspec_trap_\@
|
||||
.Ldo_rop_\@:
|
||||
mov \reg, (%_ASM_SP)
|
||||
@@ -165,6 +168,7 @@
|
||||
" .align 16\n" \
|
||||
"901: call 903f;\n" \
|
||||
"902: pause;\n" \
|
||||
" lfence;\n" \
|
||||
" jmp 902b;\n" \
|
||||
" .align 16\n" \
|
||||
"903: addl $4, %%esp;\n" \
|
||||
@@ -190,6 +194,9 @@ enum spectre_v2_mitigation {
|
||||
SPECTRE_V2_IBRS,
|
||||
};
|
||||
|
||||
extern char __indirect_thunk_start[];
|
||||
extern char __indirect_thunk_end[];
|
||||
|
||||
/*
|
||||
* On VMEXIT we must ensure that no RSB predictions learned in the guest
|
||||
* can be followed in the host, by overwriting the RSB completely. Both
|
||||
@@ -199,16 +206,17 @@ enum spectre_v2_mitigation {
|
||||
static inline void vmexit_fill_RSB(void)
|
||||
{
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
unsigned long loops = RSB_CLEAR_LOOPS / 2;
|
||||
unsigned long loops;
|
||||
|
||||
asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
|
||||
ALTERNATIVE("jmp 910f",
|
||||
__stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
|
||||
X86_FEATURE_RETPOLINE)
|
||||
"910:"
|
||||
: "=&r" (loops), ASM_CALL_CONSTRAINT
|
||||
: "r" (loops) : "memory" );
|
||||
: "=r" (loops), ASM_CALL_CONSTRAINT
|
||||
: : "memory" );
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __NOSPEC_BRANCH_H__ */
|
||||
|
||||
@@ -92,6 +92,7 @@ dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long);
|
||||
#ifdef CONFIG_X86_32
|
||||
dotraplinkage void do_iret_error(struct pt_regs *, long);
|
||||
#endif
|
||||
dotraplinkage void do_mce(struct pt_regs *, long);
|
||||
|
||||
static inline int get_si_code(unsigned long condition)
|
||||
{
|
||||
|
||||
@@ -361,14 +361,17 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
|
||||
irq_data->chip_data = data;
|
||||
irq_data->hwirq = virq + i;
|
||||
err = assign_irq_vector_policy(virq + i, node, data, info);
|
||||
if (err)
|
||||
if (err) {
|
||||
irq_data->chip_data = NULL;
|
||||
free_apic_chip_data(data);
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
x86_vector_free_irqs(domain, virq, i + 1);
|
||||
x86_vector_free_irqs(domain, virq, i);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/intel-family.h>
|
||||
|
||||
static void __init spectre_v2_select_mitigation(void);
|
||||
|
||||
@@ -154,6 +155,23 @@ disable:
|
||||
return SPECTRE_V2_CMD_NONE;
|
||||
}
|
||||
|
||||
/* Check for Skylake-like CPUs (for RSB handling) */
|
||||
static bool __init is_skylake_era(void)
|
||||
{
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
||||
boot_cpu_data.x86 == 6) {
|
||||
switch (boot_cpu_data.x86_model) {
|
||||
case INTEL_FAM6_SKYLAKE_MOBILE:
|
||||
case INTEL_FAM6_SKYLAKE_DESKTOP:
|
||||
case INTEL_FAM6_SKYLAKE_X:
|
||||
case INTEL_FAM6_KABYLAKE_MOBILE:
|
||||
case INTEL_FAM6_KABYLAKE_DESKTOP:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void __init spectre_v2_select_mitigation(void)
|
||||
{
|
||||
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
|
||||
@@ -212,6 +230,24 @@ retpoline_auto:
|
||||
|
||||
spectre_v2_enabled = mode;
|
||||
pr_info("%s\n", spectre_v2_strings[mode]);
|
||||
|
||||
/*
|
||||
* If neither SMEP or KPTI are available, there is a risk of
|
||||
* hitting userspace addresses in the RSB after a context switch
|
||||
* from a shallow call stack to a deeper one. To prevent this fill
|
||||
* the entire RSB, even when using IBRS.
|
||||
*
|
||||
* Skylake era CPUs have a separate issue with *underflow* of the
|
||||
* RSB, when they will predict 'ret' targets from the generic BTB.
|
||||
* The proper mitigation for this is IBRS. If IBRS is not supported
|
||||
* or deactivated in favour of retpolines the RSB fill on context
|
||||
* switch is required.
|
||||
*/
|
||||
if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
|
||||
!boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
||||
pr_info("Filling RSB on context switch\n");
|
||||
}
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
|
||||
@@ -883,8 +883,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
|
||||
|
||||
setup_force_cpu_cap(X86_FEATURE_ALWAYS);
|
||||
|
||||
/* Assume for now that ALL x86 CPUs are insecure */
|
||||
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
|
||||
if (c->x86_vendor != X86_VENDOR_AMD)
|
||||
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
|
||||
|
||||
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
|
||||
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
|
||||
|
||||
@@ -1754,6 +1754,11 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
|
||||
void (*machine_check_vector)(struct pt_regs *, long error_code) =
|
||||
unexpected_machine_check;
|
||||
|
||||
dotraplinkage void do_mce(struct pt_regs *regs, long error_code)
|
||||
{
|
||||
machine_check_vector(regs, error_code);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called for each booted CPU to set up machine checks.
|
||||
* Must be called with preempt off:
|
||||
|
||||
@@ -31,7 +31,6 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
|
||||
const struct cpuid_bit *cb;
|
||||
|
||||
static const struct cpuid_bit cpuid_bits[] = {
|
||||
{ X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 },
|
||||
{ X86_FEATURE_AVX512_4VNNIW, CR_EDX, 2, 0x00000007, 0 },
|
||||
{ X86_FEATURE_AVX512_4FMAPS, CR_EDX, 3, 0x00000007, 0 },
|
||||
{ X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
|
||||
|
||||
@@ -37,6 +37,7 @@
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
#include "common.h"
|
||||
|
||||
@@ -192,7 +193,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src)
|
||||
}
|
||||
|
||||
/* Check whether insn is indirect jump */
|
||||
static int insn_is_indirect_jump(struct insn *insn)
|
||||
static int __insn_is_indirect_jump(struct insn *insn)
|
||||
{
|
||||
return ((insn->opcode.bytes[0] == 0xff &&
|
||||
(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
|
||||
@@ -226,6 +227,26 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
|
||||
return (start <= target && target <= start + len);
|
||||
}
|
||||
|
||||
static int insn_is_indirect_jump(struct insn *insn)
|
||||
{
|
||||
int ret = __insn_is_indirect_jump(insn);
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
/*
|
||||
* Jump to x86_indirect_thunk_* is treated as an indirect jump.
|
||||
* Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
|
||||
* older gcc may use indirect jump. So we add this check instead of
|
||||
* replace indirect-jump check.
|
||||
*/
|
||||
if (!ret)
|
||||
ret = insn_jump_into_range(insn,
|
||||
(unsigned long)__indirect_thunk_start,
|
||||
(unsigned long)__indirect_thunk_end -
|
||||
(unsigned long)__indirect_thunk_start);
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Decode whole function to ensure any instructions don't jump into target */
|
||||
static int can_optimize(unsigned long paddr)
|
||||
{
|
||||
|
||||
@@ -693,7 +693,6 @@ unsigned long native_calibrate_tsc(void)
|
||||
case INTEL_FAM6_KABYLAKE_DESKTOP:
|
||||
crystal_khz = 24000; /* 24.0 MHz */
|
||||
break;
|
||||
case INTEL_FAM6_SKYLAKE_X:
|
||||
case INTEL_FAM6_ATOM_DENVERTON:
|
||||
crystal_khz = 25000; /* 25.0 MHz */
|
||||
break;
|
||||
|
||||
@@ -105,6 +105,13 @@ SECTIONS
|
||||
SOFTIRQENTRY_TEXT
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
__indirect_thunk_start = .;
|
||||
*(.text.__x86.indirect_thunk)
|
||||
__indirect_thunk_end = .;
|
||||
#endif
|
||||
|
||||
/* End of text section */
|
||||
_etext = .;
|
||||
} :text = 0x9090
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
.macro THUNK reg
|
||||
.section .text.__x86.indirect_thunk.\reg
|
||||
.section .text.__x86.indirect_thunk
|
||||
|
||||
ENTRY(__x86_indirect_thunk_\reg)
|
||||
CFI_STARTPROC
|
||||
@@ -25,7 +25,8 @@ ENDPROC(__x86_indirect_thunk_\reg)
|
||||
* than one per register with the correct names. So we do it
|
||||
* the simple and nasty way...
|
||||
*/
|
||||
#define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_ ## reg)
|
||||
#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
|
||||
#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
|
||||
#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
|
||||
|
||||
GENERATE_THUNK(_ASM_AX)
|
||||
|
||||
@@ -191,14 +191,15 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
|
||||
* 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
|
||||
* faulted on a pte with its pkey=4.
|
||||
*/
|
||||
static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
|
||||
static void fill_sig_info_pkey(int si_signo, int si_code, siginfo_t *info,
|
||||
u32 *pkey)
|
||||
{
|
||||
/* This is effectively an #ifdef */
|
||||
if (!boot_cpu_has(X86_FEATURE_OSPKE))
|
||||
return;
|
||||
|
||||
/* Fault not from Protection Keys: nothing to do */
|
||||
if (si_code != SEGV_PKUERR)
|
||||
if ((si_code != SEGV_PKUERR) || (si_signo != SIGSEGV))
|
||||
return;
|
||||
/*
|
||||
* force_sig_info_fault() is called from a number of
|
||||
@@ -237,7 +238,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
|
||||
lsb = PAGE_SHIFT;
|
||||
info.si_addr_lsb = lsb;
|
||||
|
||||
fill_sig_info_pkey(si_code, &info, pkey);
|
||||
fill_sig_info_pkey(si_signo, si_code, &info, pkey);
|
||||
|
||||
force_sig_info(si_signo, &info, tsk);
|
||||
}
|
||||
|
||||
@@ -4322,6 +4322,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=121671
|
||||
*/
|
||||
{ "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
|
||||
{ "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
|
||||
|
||||
/* Devices we expect to fail diagnostics */
|
||||
|
||||
|
||||
@@ -747,6 +747,7 @@ isert_connect_error(struct rdma_cm_id *cma_id)
|
||||
{
|
||||
struct isert_conn *isert_conn = cma_id->qp->qp_context;
|
||||
|
||||
ib_drain_qp(isert_conn->qp);
|
||||
list_del_init(&isert_conn->node);
|
||||
isert_conn->cm_id = NULL;
|
||||
isert_put_conn(isert_conn);
|
||||
|
||||
@@ -178,12 +178,14 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
|
||||
twl4030_vibra_suspend, twl4030_vibra_resume);
|
||||
|
||||
static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
|
||||
struct device_node *node)
|
||||
struct device_node *parent)
|
||||
{
|
||||
struct device_node *node;
|
||||
|
||||
if (pdata && pdata->coexist)
|
||||
return true;
|
||||
|
||||
node = of_find_node_by_name(node, "codec");
|
||||
node = of_get_child_by_name(parent, "codec");
|
||||
if (node) {
|
||||
of_node_put(node);
|
||||
return true;
|
||||
|
||||
@@ -248,8 +248,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
|
||||
int vddvibr_uV = 0;
|
||||
int error;
|
||||
|
||||
of_node_get(twl6040_core_dev->of_node);
|
||||
twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
|
||||
twl6040_core_node = of_get_child_by_name(twl6040_core_dev->of_node,
|
||||
"vibra");
|
||||
if (!twl6040_core_node) {
|
||||
dev_err(&pdev->dev, "parent of node is missing?\n");
|
||||
|
||||
@@ -1247,29 +1247,32 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
|
||||
case SS4_PACKET_ID_MULTI:
|
||||
if (priv->flags & ALPS_BUTTONPAD) {
|
||||
if (IS_SS4PLUS_DEV(priv->dev_id)) {
|
||||
f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
|
||||
f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
|
||||
f->mt[2].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
|
||||
f->mt[3].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
|
||||
no_data_x = SS4_PLUS_MFPACKET_NO_AX_BL;
|
||||
} else {
|
||||
f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
|
||||
f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
|
||||
no_data_x = SS4_MFPACKET_NO_AX_BL;
|
||||
}
|
||||
no_data_y = SS4_MFPACKET_NO_AY_BL;
|
||||
|
||||
f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
|
||||
f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
|
||||
no_data_x = SS4_MFPACKET_NO_AX_BL;
|
||||
no_data_y = SS4_MFPACKET_NO_AY_BL;
|
||||
} else {
|
||||
if (IS_SS4PLUS_DEV(priv->dev_id)) {
|
||||
f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
|
||||
f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
|
||||
f->mt[2].x = SS4_PLUS_STD_MF_X_V2(p, 0);
|
||||
f->mt[3].x = SS4_PLUS_STD_MF_X_V2(p, 1);
|
||||
no_data_x = SS4_PLUS_MFPACKET_NO_AX;
|
||||
} else {
|
||||
f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
|
||||
f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
|
||||
f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
|
||||
f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
|
||||
no_data_x = SS4_MFPACKET_NO_AX;
|
||||
}
|
||||
no_data_y = SS4_MFPACKET_NO_AY;
|
||||
|
||||
f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
|
||||
f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
|
||||
no_data_x = SS4_MFPACKET_NO_AX;
|
||||
no_data_y = SS4_MFPACKET_NO_AY;
|
||||
}
|
||||
|
||||
f->first_mp = 0;
|
||||
|
||||
@@ -120,10 +120,12 @@ enum SS4_PACKET_ID {
|
||||
#define SS4_IS_5F_DETECTED(_b) ((_b[2] & 0x10) == 0x10)
|
||||
|
||||
|
||||
#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */
|
||||
#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */
|
||||
#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coordinate value */
|
||||
#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coordinate value */
|
||||
#define SS4_MFPACKET_NO_AX 8160 /* X-Coordinate value */
|
||||
#define SS4_MFPACKET_NO_AY 4080 /* Y-Coordinate value */
|
||||
#define SS4_MFPACKET_NO_AX_BL 8176 /* Buttonless X-Coord value */
|
||||
#define SS4_MFPACKET_NO_AY_BL 4088 /* Buttonless Y-Coord value */
|
||||
#define SS4_PLUS_MFPACKET_NO_AX 4080 /* SS4 PLUS, X */
|
||||
#define SS4_PLUS_MFPACKET_NO_AX_BL 4088 /* Buttonless SS4 PLUS, X */
|
||||
|
||||
/*
|
||||
* enum V7_PACKET_ID - defines the packet type for V7
|
||||
|
||||
@@ -126,7 +126,7 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
|
||||
int data, n, ret;
|
||||
if (!np)
|
||||
return -ENODEV;
|
||||
np = of_find_node_by_name(np, "touch");
|
||||
np = of_get_child_by_name(np, "touch");
|
||||
if (!np) {
|
||||
dev_err(&pdev->dev, "Can't find touch node\n");
|
||||
return -EINVAL;
|
||||
@@ -144,13 +144,13 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
|
||||
if (data) {
|
||||
ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data);
|
||||
if (ret < 0)
|
||||
return -EINVAL;
|
||||
goto err_put_node;
|
||||
}
|
||||
/* set tsi prebias time */
|
||||
if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) {
|
||||
ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data);
|
||||
if (ret < 0)
|
||||
return -EINVAL;
|
||||
goto err_put_node;
|
||||
}
|
||||
/* set prebias & prechg time of pen detect */
|
||||
data = 0;
|
||||
@@ -161,10 +161,18 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
|
||||
if (data) {
|
||||
ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data);
|
||||
if (ret < 0)
|
||||
return -EINVAL;
|
||||
goto err_put_node;
|
||||
}
|
||||
of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x);
|
||||
|
||||
of_node_put(np);
|
||||
|
||||
return 0;
|
||||
|
||||
err_put_node:
|
||||
of_node_put(np);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
#else
|
||||
#define pm860x_touch_dt_init(x, y, z) (-1)
|
||||
|
||||
@@ -81,10 +81,14 @@
|
||||
#define SECTOR_TO_BLOCK_SHIFT 3
|
||||
|
||||
/*
|
||||
* For btree insert:
|
||||
* 3 for btree insert +
|
||||
* 2 for btree lookup used within space map
|
||||
* For btree remove:
|
||||
* 2 for shadow spine +
|
||||
* 4 for rebalance 3 child node
|
||||
*/
|
||||
#define THIN_MAX_CONCURRENT_LOCKS 5
|
||||
#define THIN_MAX_CONCURRENT_LOCKS 6
|
||||
|
||||
/* This should be plenty */
|
||||
#define SPACE_MAP_ROOT_SIZE 128
|
||||
|
||||
@@ -678,23 +678,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
|
||||
pn->keys[1] = rn->keys[0];
|
||||
memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
|
||||
|
||||
/*
|
||||
* rejig the spine. This is ugly, since it knows too
|
||||
* much about the spine
|
||||
*/
|
||||
if (s->nodes[0] != new_parent) {
|
||||
unlock_block(s->info, s->nodes[0]);
|
||||
s->nodes[0] = new_parent;
|
||||
}
|
||||
if (key < le64_to_cpu(rn->keys[0])) {
|
||||
unlock_block(s->info, right);
|
||||
s->nodes[1] = left;
|
||||
} else {
|
||||
unlock_block(s->info, left);
|
||||
s->nodes[1] = right;
|
||||
}
|
||||
s->count = 2;
|
||||
|
||||
unlock_block(s->info, left);
|
||||
unlock_block(s->info, right);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
|
||||
void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
|
||||
int err = 0;
|
||||
u8 *packet_ptr;
|
||||
int i, n = 1, packet_len;
|
||||
int packet_len;
|
||||
ptrdiff_t cmd_len;
|
||||
|
||||
/* usb device unregistered? */
|
||||
@@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
|
||||
}
|
||||
|
||||
packet_ptr = cmd_head;
|
||||
packet_len = cmd_len;
|
||||
|
||||
/* firmware is not able to re-assemble 512 bytes buffer in full-speed */
|
||||
if ((dev->udev->speed != USB_SPEED_HIGH) &&
|
||||
(cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) {
|
||||
packet_len = PCAN_UFD_LOSPD_PKT_SIZE;
|
||||
n += cmd_len / packet_len;
|
||||
} else {
|
||||
packet_len = cmd_len;
|
||||
}
|
||||
if (unlikely(dev->udev->speed != USB_SPEED_HIGH))
|
||||
packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
do {
|
||||
err = usb_bulk_msg(dev->udev,
|
||||
usb_sndbulkpipe(dev->udev,
|
||||
PCAN_USBPRO_EP_CMDOUT),
|
||||
@@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
|
||||
}
|
||||
|
||||
packet_ptr += packet_len;
|
||||
}
|
||||
cmd_len -= packet_len;
|
||||
|
||||
if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE)
|
||||
packet_len = cmd_len;
|
||||
|
||||
} while (packet_len > 0);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -183,13 +183,13 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int btt_log_read_pair(struct arena_info *arena, u32 lane,
|
||||
struct log_entry *ent)
|
||||
static int btt_log_group_read(struct arena_info *arena, u32 lane,
|
||||
struct log_group *log)
|
||||
{
|
||||
WARN_ON(!ent);
|
||||
WARN_ON(!log);
|
||||
return arena_read_bytes(arena,
|
||||
arena->logoff + (2 * lane * LOG_ENT_SIZE), ent,
|
||||
2 * LOG_ENT_SIZE);
|
||||
arena->logoff + (lane * LOG_GRP_SIZE), log,
|
||||
LOG_GRP_SIZE);
|
||||
}
|
||||
|
||||
static struct dentry *debugfs_root;
|
||||
@@ -229,6 +229,8 @@ static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
|
||||
debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
|
||||
debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
|
||||
debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
|
||||
debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
|
||||
debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
|
||||
}
|
||||
|
||||
static void btt_debugfs_init(struct btt *btt)
|
||||
@@ -247,6 +249,11 @@ static void btt_debugfs_init(struct btt *btt)
|
||||
}
|
||||
}
|
||||
|
||||
static u32 log_seq(struct log_group *log, int log_idx)
|
||||
{
|
||||
return le32_to_cpu(log->ent[log_idx].seq);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function accepts two log entries, and uses the
|
||||
* sequence number to find the 'older' entry.
|
||||
@@ -256,8 +263,10 @@ static void btt_debugfs_init(struct btt *btt)
|
||||
*
|
||||
* TODO The logic feels a bit kludge-y. make it better..
|
||||
*/
|
||||
static int btt_log_get_old(struct log_entry *ent)
|
||||
static int btt_log_get_old(struct arena_info *a, struct log_group *log)
|
||||
{
|
||||
int idx0 = a->log_index[0];
|
||||
int idx1 = a->log_index[1];
|
||||
int old;
|
||||
|
||||
/*
|
||||
@@ -265,23 +274,23 @@ static int btt_log_get_old(struct log_entry *ent)
|
||||
* the next time, the following logic works out to put this
|
||||
* (next) entry into [1]
|
||||
*/
|
||||
if (ent[0].seq == 0) {
|
||||
ent[0].seq = cpu_to_le32(1);
|
||||
if (log_seq(log, idx0) == 0) {
|
||||
log->ent[idx0].seq = cpu_to_le32(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ent[0].seq == ent[1].seq)
|
||||
if (log_seq(log, idx0) == log_seq(log, idx1))
|
||||
return -EINVAL;
|
||||
if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5)
|
||||
if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
|
||||
return -EINVAL;
|
||||
|
||||
if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) {
|
||||
if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1)
|
||||
if (log_seq(log, idx0) < log_seq(log, idx1)) {
|
||||
if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
|
||||
old = 0;
|
||||
else
|
||||
old = 1;
|
||||
} else {
|
||||
if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1)
|
||||
if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
|
||||
old = 1;
|
||||
else
|
||||
old = 0;
|
||||
@@ -306,17 +315,18 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
|
||||
{
|
||||
int ret;
|
||||
int old_ent, ret_ent;
|
||||
struct log_entry log[2];
|
||||
struct log_group log;
|
||||
|
||||
ret = btt_log_read_pair(arena, lane, log);
|
||||
ret = btt_log_group_read(arena, lane, &log);
|
||||
if (ret)
|
||||
return -EIO;
|
||||
|
||||
old_ent = btt_log_get_old(log);
|
||||
old_ent = btt_log_get_old(arena, &log);
|
||||
if (old_ent < 0 || old_ent > 1) {
|
||||
dev_info(to_dev(arena),
|
||||
"log corruption (%d): lane %d seq [%d, %d]\n",
|
||||
old_ent, lane, log[0].seq, log[1].seq);
|
||||
old_ent, lane, log.ent[arena->log_index[0]].seq,
|
||||
log.ent[arena->log_index[1]].seq);
|
||||
/* TODO set error state? */
|
||||
return -EIO;
|
||||
}
|
||||
@@ -324,7 +334,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
|
||||
ret_ent = (old_flag ? old_ent : (1 - old_ent));
|
||||
|
||||
if (ent != NULL)
|
||||
memcpy(ent, &log[ret_ent], LOG_ENT_SIZE);
|
||||
memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
|
||||
|
||||
return ret_ent;
|
||||
}
|
||||
@@ -338,17 +348,13 @@ static int __btt_log_write(struct arena_info *arena, u32 lane,
|
||||
u32 sub, struct log_entry *ent)
|
||||
{
|
||||
int ret;
|
||||
/*
|
||||
* Ignore the padding in log_entry for calculating log_half.
|
||||
* The entry is 'committed' when we write the sequence number,
|
||||
* and we want to ensure that that is the last thing written.
|
||||
* We don't bother writing the padding as that would be extra
|
||||
* media wear and write amplification
|
||||
*/
|
||||
unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2;
|
||||
u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE);
|
||||
u32 group_slot = arena->log_index[sub];
|
||||
unsigned int log_half = LOG_ENT_SIZE / 2;
|
||||
void *src = ent;
|
||||
u64 ns_off;
|
||||
|
||||
ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
|
||||
(group_slot * LOG_ENT_SIZE);
|
||||
/* split the 16B write into atomic, durable halves */
|
||||
ret = arena_write_bytes(arena, ns_off, src, log_half);
|
||||
if (ret)
|
||||
@@ -419,16 +425,16 @@ static int btt_log_init(struct arena_info *arena)
|
||||
{
|
||||
int ret;
|
||||
u32 i;
|
||||
struct log_entry log, zerolog;
|
||||
struct log_entry ent, zerolog;
|
||||
|
||||
memset(&zerolog, 0, sizeof(zerolog));
|
||||
|
||||
for (i = 0; i < arena->nfree; i++) {
|
||||
log.lba = cpu_to_le32(i);
|
||||
log.old_map = cpu_to_le32(arena->external_nlba + i);
|
||||
log.new_map = cpu_to_le32(arena->external_nlba + i);
|
||||
log.seq = cpu_to_le32(LOG_SEQ_INIT);
|
||||
ret = __btt_log_write(arena, i, 0, &log);
|
||||
ent.lba = cpu_to_le32(i);
|
||||
ent.old_map = cpu_to_le32(arena->external_nlba + i);
|
||||
ent.new_map = cpu_to_le32(arena->external_nlba + i);
|
||||
ent.seq = cpu_to_le32(LOG_SEQ_INIT);
|
||||
ret = __btt_log_write(arena, i, 0, &ent);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = __btt_log_write(arena, i, 1, &zerolog);
|
||||
@@ -490,6 +496,123 @@ static int btt_freelist_init(struct arena_info *arena)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool ent_is_padding(struct log_entry *ent)
|
||||
{
|
||||
return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
|
||||
&& (ent->seq == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Detecting valid log indices: We read a log group (see the comments in btt.h
|
||||
* for a description of a 'log_group' and its 'slots'), and iterate over its
|
||||
* four slots. We expect that a padding slot will be all-zeroes, and use this
|
||||
* to detect a padding slot vs. an actual entry.
|
||||
*
|
||||
* If a log_group is in the initial state, i.e. hasn't been used since the
|
||||
* creation of this BTT layout, it will have three of the four slots with
|
||||
* zeroes. We skip over these log_groups for the detection of log_index. If
|
||||
* all log_groups are in the initial state (i.e. the BTT has never been
|
||||
* written to), it is safe to assume the 'new format' of log entries in slots
|
||||
* (0, 1).
|
||||
*/
|
||||
static int log_set_indices(struct arena_info *arena)
|
||||
{
|
||||
bool idx_set = false, initial_state = true;
|
||||
int ret, log_index[2] = {-1, -1};
|
||||
u32 i, j, next_idx = 0;
|
||||
struct log_group log;
|
||||
u32 pad_count = 0;
|
||||
|
||||
for (i = 0; i < arena->nfree; i++) {
|
||||
ret = btt_log_group_read(arena, i, &log);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
for (j = 0; j < 4; j++) {
|
||||
if (!idx_set) {
|
||||
if (ent_is_padding(&log.ent[j])) {
|
||||
pad_count++;
|
||||
continue;
|
||||
} else {
|
||||
/* Skip if index has been recorded */
|
||||
if ((next_idx == 1) &&
|
||||
(j == log_index[0]))
|
||||
continue;
|
||||
/* valid entry, record index */
|
||||
log_index[next_idx] = j;
|
||||
next_idx++;
|
||||
}
|
||||
if (next_idx == 2) {
|
||||
/* two valid entries found */
|
||||
idx_set = true;
|
||||
} else if (next_idx > 2) {
|
||||
/* too many valid indices */
|
||||
return -ENXIO;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* once the indices have been set, just verify
|
||||
* that all subsequent log groups are either in
|
||||
* their initial state or follow the same
|
||||
* indices.
|
||||
*/
|
||||
if (j == log_index[0]) {
|
||||
/* entry must be 'valid' */
|
||||
if (ent_is_padding(&log.ent[j]))
|
||||
return -ENXIO;
|
||||
} else if (j == log_index[1]) {
|
||||
;
|
||||
/*
|
||||
* log_index[1] can be padding if the
|
||||
* lane never got used and it is still
|
||||
* in the initial state (three 'padding'
|
||||
* entries)
|
||||
*/
|
||||
} else {
|
||||
/* entry must be invalid (padding) */
|
||||
if (!ent_is_padding(&log.ent[j]))
|
||||
return -ENXIO;
|
||||
}
|
||||
}
|
||||
}
|
||||
/*
|
||||
* If any of the log_groups have more than one valid,
|
||||
* non-padding entry, then the we are no longer in the
|
||||
* initial_state
|
||||
*/
|
||||
if (pad_count < 3)
|
||||
initial_state = false;
|
||||
pad_count = 0;
|
||||
}
|
||||
|
||||
if (!initial_state && !idx_set)
|
||||
return -ENXIO;
|
||||
|
||||
/*
|
||||
* If all the entries in the log were in the initial state,
|
||||
* assume new padding scheme
|
||||
*/
|
||||
if (initial_state)
|
||||
log_index[1] = 1;
|
||||
|
||||
/*
|
||||
* Only allow the known permutations of log/padding indices,
|
||||
* i.e. (0, 1), and (0, 2)
|
||||
*/
|
||||
if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
|
||||
; /* known index possibilities */
|
||||
else {
|
||||
dev_err(to_dev(arena), "Found an unknown padding scheme\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
arena->log_index[0] = log_index[0];
|
||||
arena->log_index[1] = log_index[1];
|
||||
dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
|
||||
dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btt_rtt_init(struct arena_info *arena)
|
||||
{
|
||||
arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
|
||||
@@ -545,8 +668,7 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
|
||||
available -= 2 * BTT_PG_SIZE;
|
||||
|
||||
/* The log takes a fixed amount of space based on nfree */
|
||||
logsize = roundup(2 * arena->nfree * sizeof(struct log_entry),
|
||||
BTT_PG_SIZE);
|
||||
logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
|
||||
available -= logsize;
|
||||
|
||||
/* Calculate optimal split between map and data area */
|
||||
@@ -563,6 +685,10 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
|
||||
arena->mapoff = arena->dataoff + datasize;
|
||||
arena->logoff = arena->mapoff + mapsize;
|
||||
arena->info2off = arena->logoff + logsize;
|
||||
|
||||
/* Default log indices are (0,1) */
|
||||
arena->log_index[0] = 0;
|
||||
arena->log_index[1] = 1;
|
||||
return arena;
|
||||
}
|
||||
|
||||
@@ -653,6 +779,13 @@ static int discover_arenas(struct btt *btt)
|
||||
arena->external_lba_start = cur_nlba;
|
||||
parse_arena_meta(arena, super, cur_off);
|
||||
|
||||
ret = log_set_indices(arena);
|
||||
if (ret) {
|
||||
dev_err(to_dev(arena),
|
||||
"Unable to deduce log/padding indices\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = btt_freelist_init(arena);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#define MAP_ERR_MASK (1 << MAP_ERR_SHIFT)
|
||||
#define MAP_LBA_MASK (~((1 << MAP_TRIM_SHIFT) | (1 << MAP_ERR_SHIFT)))
|
||||
#define MAP_ENT_NORMAL 0xC0000000
|
||||
#define LOG_GRP_SIZE sizeof(struct log_group)
|
||||
#define LOG_ENT_SIZE sizeof(struct log_entry)
|
||||
#define ARENA_MIN_SIZE (1UL << 24) /* 16 MB */
|
||||
#define ARENA_MAX_SIZE (1ULL << 39) /* 512 GB */
|
||||
@@ -44,12 +45,52 @@ enum btt_init_state {
|
||||
INIT_READY
|
||||
};
|
||||
|
||||
/*
|
||||
* A log group represents one log 'lane', and consists of four log entries.
|
||||
* Two of the four entries are valid entries, and the remaining two are
|
||||
* padding. Due to an old bug in the padding location, we need to perform a
|
||||
* test to determine the padding scheme being used, and use that scheme
|
||||
* thereafter.
|
||||
*
|
||||
* In kernels prior to 4.15, 'log group' would have actual log entries at
|
||||
* indices (0, 2) and padding at indices (1, 3), where as the correct/updated
|
||||
* format has log entries at indices (0, 1) and padding at indices (2, 3).
|
||||
*
|
||||
* Old (pre 4.15) format:
|
||||
* +-----------------+-----------------+
|
||||
* | ent[0] | ent[1] |
|
||||
* | 16B | 16B |
|
||||
* | lba/old/new/seq | pad |
|
||||
* +-----------------------------------+
|
||||
* | ent[2] | ent[3] |
|
||||
* | 16B | 16B |
|
||||
* | lba/old/new/seq | pad |
|
||||
* +-----------------+-----------------+
|
||||
*
|
||||
* New format:
|
||||
* +-----------------+-----------------+
|
||||
* | ent[0] | ent[1] |
|
||||
* | 16B | 16B |
|
||||
* | lba/old/new/seq | lba/old/new/seq |
|
||||
* +-----------------------------------+
|
||||
* | ent[2] | ent[3] |
|
||||
* | 16B | 16B |
|
||||
* | pad | pad |
|
||||
* +-----------------+-----------------+
|
||||
*
|
||||
* We detect during start-up which format is in use, and set
|
||||
* arena->log_index[(0, 1)] with the detected format.
|
||||
*/
|
||||
|
||||
struct log_entry {
|
||||
__le32 lba;
|
||||
__le32 old_map;
|
||||
__le32 new_map;
|
||||
__le32 seq;
|
||||
__le64 padding[2];
|
||||
};
|
||||
|
||||
struct log_group {
|
||||
struct log_entry ent[4];
|
||||
};
|
||||
|
||||
struct btt_sb {
|
||||
@@ -117,6 +158,7 @@ struct aligned_lock {
|
||||
* @list: List head for list of arenas
|
||||
* @debugfs_dir: Debugfs dentry
|
||||
* @flags: Arena flags - may signify error states.
|
||||
* @log_index: Indices of the valid log entries in a log_group
|
||||
*
|
||||
* arena_info is a per-arena handle. Once an arena is narrowed down for an
|
||||
* IO, this struct is passed around for the duration of the IO.
|
||||
@@ -147,6 +189,7 @@ struct arena_info {
|
||||
struct dentry *debugfs_dir;
|
||||
/* Arena flags */
|
||||
u32 flags;
|
||||
int log_index[2];
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -395,6 +395,10 @@ static struct phy *_of_phy_get(struct device_node *np, int index)
|
||||
if (ret)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
/* This phy type handled by the usb-phy subsystem for now */
|
||||
if (of_device_is_compatible(args.np, "usb-nop-xceiv"))
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
mutex_lock(&phy_provider_mutex);
|
||||
phy_provider = of_phy_provider_lookup(args.np);
|
||||
if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
|
||||
|
||||
@@ -3857,6 +3857,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
|
||||
if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
|
||||
hpsa_get_ioaccel_status(h, scsi3addr, this_device);
|
||||
volume_offline = hpsa_volume_offline(h, scsi3addr);
|
||||
this_device->volume_offline = volume_offline;
|
||||
if (volume_offline == HPSA_LV_FAILED) {
|
||||
rc = HPSA_LV_FAILED;
|
||||
dev_err(&h->pdev->dev,
|
||||
|
||||
@@ -149,7 +149,6 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
|
||||
struct list_head rq_list; /* head of request list */
|
||||
struct fasync_struct *async_qp; /* used by asynchronous notification */
|
||||
Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
|
||||
char low_dma; /* as in parent but possibly overridden to 1 */
|
||||
char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
|
||||
char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
|
||||
unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
|
||||
@@ -922,24 +921,14 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
|
||||
/* strange ..., for backward compatibility */
|
||||
return sfp->timeout_user;
|
||||
case SG_SET_FORCE_LOW_DMA:
|
||||
result = get_user(val, ip);
|
||||
if (result)
|
||||
return result;
|
||||
if (val) {
|
||||
sfp->low_dma = 1;
|
||||
if ((0 == sfp->low_dma) && !sfp->res_in_use) {
|
||||
val = (int) sfp->reserve.bufflen;
|
||||
sg_remove_scat(sfp, &sfp->reserve);
|
||||
sg_build_reserve(sfp, val);
|
||||
}
|
||||
} else {
|
||||
if (atomic_read(&sdp->detaching))
|
||||
return -ENODEV;
|
||||
sfp->low_dma = sdp->device->host->unchecked_isa_dma;
|
||||
}
|
||||
/*
|
||||
* N.B. This ioctl never worked properly, but failed to
|
||||
* return an error value. So returning '0' to keep compability
|
||||
* with legacy applications.
|
||||
*/
|
||||
return 0;
|
||||
case SG_GET_LOW_DMA:
|
||||
return put_user((int) sfp->low_dma, ip);
|
||||
return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
|
||||
case SG_GET_SCSI_ID:
|
||||
if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
|
||||
return -EFAULT;
|
||||
@@ -1860,6 +1849,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
|
||||
int sg_tablesize = sfp->parentdp->sg_tablesize;
|
||||
int blk_size = buff_size, order;
|
||||
gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
|
||||
struct sg_device *sdp = sfp->parentdp;
|
||||
|
||||
if (blk_size < 0)
|
||||
return -EFAULT;
|
||||
@@ -1885,7 +1875,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
|
||||
scatter_elem_sz_prev = num;
|
||||
}
|
||||
|
||||
if (sfp->low_dma)
|
||||
if (sdp->device->host->unchecked_isa_dma)
|
||||
gfp_mask |= GFP_DMA;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
|
||||
@@ -2148,8 +2138,6 @@ sg_add_sfp(Sg_device * sdp)
|
||||
sfp->timeout = SG_DEFAULT_TIMEOUT;
|
||||
sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
|
||||
sfp->force_packid = SG_DEF_FORCE_PACK_ID;
|
||||
sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
|
||||
sdp->device->host->unchecked_isa_dma : 1;
|
||||
sfp->cmd_q = SG_DEF_COMMAND_Q;
|
||||
sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
|
||||
sfp->parentdp = sdp;
|
||||
@@ -2608,7 +2596,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
|
||||
jiffies_to_msecs(fp->timeout),
|
||||
fp->reserve.bufflen,
|
||||
(int) fp->reserve.k_use_sg,
|
||||
(int) fp->low_dma);
|
||||
(int) sdp->device->host->unchecked_isa_dma);
|
||||
seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
|
||||
(int) fp->cmd_q, (int) fp->force_packid,
|
||||
(int) fp->keep_orphan);
|
||||
|
||||
@@ -361,6 +361,7 @@ static void set_status_attr(int id)
|
||||
status->attr.attr.name = status->name;
|
||||
status->attr.attr.mode = S_IRUGO;
|
||||
status->attr.show = status_show;
|
||||
sysfs_attr_init(&status->attr.attr);
|
||||
}
|
||||
|
||||
static int init_status_attrs(void)
|
||||
|
||||
17
fs/pipe.c
17
fs/pipe.c
@@ -1018,13 +1018,19 @@ const struct file_operations pipefifo_fops = {
|
||||
|
||||
/*
|
||||
* Currently we rely on the pipe array holding a power-of-2 number
|
||||
* of pages.
|
||||
* of pages. Returns 0 on error.
|
||||
*/
|
||||
static inline unsigned int round_pipe_size(unsigned int size)
|
||||
{
|
||||
unsigned long nr_pages;
|
||||
|
||||
if (size < pipe_min_size)
|
||||
size = pipe_min_size;
|
||||
|
||||
nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
if (nr_pages == 0)
|
||||
return 0;
|
||||
|
||||
return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
@@ -1040,6 +1046,8 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
|
||||
long ret = 0;
|
||||
|
||||
size = round_pipe_size(arg);
|
||||
if (size == 0)
|
||||
return -EINVAL;
|
||||
nr_pages = size >> PAGE_SHIFT;
|
||||
|
||||
if (!nr_pages)
|
||||
@@ -1123,13 +1131,18 @@ out_revert_acct:
|
||||
int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
unsigned int rounded_pipe_max_size;
|
||||
int ret;
|
||||
|
||||
ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
|
||||
if (ret < 0 || !write)
|
||||
return ret;
|
||||
|
||||
pipe_max_size = round_pipe_size(pipe_max_size);
|
||||
rounded_pipe_max_size = round_pipe_size(pipe_max_size);
|
||||
if (rounded_pipe_max_size == 0)
|
||||
return -EINVAL;
|
||||
|
||||
pipe_max_size = rounded_pipe_max_size;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -423,8 +423,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
||||
* safe because the task has stopped executing permanently.
|
||||
*/
|
||||
if (permitted && (task->flags & PF_DUMPCORE)) {
|
||||
eip = KSTK_EIP(task);
|
||||
esp = KSTK_ESP(task);
|
||||
if (try_get_task_stack(task)) {
|
||||
eip = KSTK_EIP(task);
|
||||
esp = KSTK_ESP(task);
|
||||
put_task_stack(task);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -24,10 +24,16 @@
|
||||
#ifndef MODULE_ARCH_VERMAGIC
|
||||
#define MODULE_ARCH_VERMAGIC ""
|
||||
#endif
|
||||
#ifdef RETPOLINE
|
||||
#define MODULE_VERMAGIC_RETPOLINE "retpoline "
|
||||
#else
|
||||
#define MODULE_VERMAGIC_RETPOLINE ""
|
||||
#endif
|
||||
|
||||
#define VERMAGIC_STRING \
|
||||
UTS_RELEASE " " \
|
||||
MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
|
||||
MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
|
||||
MODULE_ARCH_VERMAGIC
|
||||
MODULE_ARCH_VERMAGIC \
|
||||
MODULE_VERMAGIC_RETPOLINE
|
||||
|
||||
|
||||
@@ -197,7 +197,6 @@ typedef struct sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
|
||||
#define SG_DEFAULT_RETRIES 0
|
||||
|
||||
/* Defaults, commented if they differ from original sg driver */
|
||||
#define SG_DEF_FORCE_LOW_DMA 0 /* was 1 -> memory below 16MB on i386 */
|
||||
#define SG_DEF_FORCE_PACK_ID 0
|
||||
#define SG_DEF_KEEP_ORPHAN 0
|
||||
#define SG_DEF_RESERVED_SIZE SG_SCATTER_SZ /* load time option */
|
||||
|
||||
@@ -1711,6 +1711,9 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
|
||||
struct futex_q *this, *next;
|
||||
WAKE_Q(wake_q);
|
||||
|
||||
if (nr_wake < 0 || nr_requeue < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (requeue_pi) {
|
||||
/*
|
||||
* Requeue PI only works on two distinct uaddrs. This
|
||||
|
||||
@@ -723,6 +723,8 @@ static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
|
||||
if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
|
||||
return;
|
||||
dl_se->dl_throttled = 1;
|
||||
if (dl_se->runtime > 0)
|
||||
dl_se->runtime = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1696,7 +1696,7 @@ void run_local_timers(void)
|
||||
hrtimer_run_queues();
|
||||
/* Raise the softirq only if required. */
|
||||
if (time_before(jiffies, base->clk)) {
|
||||
if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
|
||||
if (!IS_ENABLED(CONFIG_NO_HZ_COMMON))
|
||||
return;
|
||||
/* CPU is awake, so check the deferrable base. */
|
||||
base++;
|
||||
|
||||
@@ -2200,6 +2200,7 @@ void trace_event_enum_update(struct trace_enum_map **map, int len)
|
||||
{
|
||||
struct trace_event_call *call, *p;
|
||||
const char *last_system = NULL;
|
||||
bool first = false;
|
||||
int last_i;
|
||||
int i;
|
||||
|
||||
@@ -2207,15 +2208,28 @@ void trace_event_enum_update(struct trace_enum_map **map, int len)
|
||||
list_for_each_entry_safe(call, p, &ftrace_events, list) {
|
||||
/* events are usually grouped together with systems */
|
||||
if (!last_system || call->class->system != last_system) {
|
||||
first = true;
|
||||
last_i = 0;
|
||||
last_system = call->class->system;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since calls are grouped by systems, the likelyhood that the
|
||||
* next call in the iteration belongs to the same system as the
|
||||
* previous call is high. As an optimization, we skip seaching
|
||||
* for a map[] that matches the call's system if the last call
|
||||
* was from the same system. That's what last_i is for. If the
|
||||
* call has the same system as the previous call, then last_i
|
||||
* will be the index of the first map[] that has a matching
|
||||
* system.
|
||||
*/
|
||||
for (i = last_i; i < len; i++) {
|
||||
if (call->class->system == map[i]->system) {
|
||||
/* Save the first system if need be */
|
||||
if (!last_i)
|
||||
if (first) {
|
||||
last_i = i;
|
||||
first = false;
|
||||
}
|
||||
update_event_printk(call, map[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,6 +48,7 @@
|
||||
#include <linux/nodemask.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/nmi.h>
|
||||
|
||||
#include "workqueue_internal.h"
|
||||
|
||||
@@ -4424,6 +4425,12 @@ void show_workqueue_state(void)
|
||||
if (pwq->nr_active || !list_empty(&pwq->delayed_works))
|
||||
show_pwq(pwq);
|
||||
spin_unlock_irqrestore(&pwq->pool->lock, flags);
|
||||
/*
|
||||
* We could be printing a lot from atomic context, e.g.
|
||||
* sysrq-t -> show_workqueue_state(). Avoid triggering
|
||||
* hard lockup.
|
||||
*/
|
||||
touch_nmi_watchdog();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4451,6 +4458,12 @@ void show_workqueue_state(void)
|
||||
pr_cont("\n");
|
||||
next_pool:
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
/*
|
||||
* We could be printing a lot from atomic context, e.g.
|
||||
* sysrq-t -> show_workqueue_state(). Avoid triggering
|
||||
* hard lockup.
|
||||
*/
|
||||
touch_nmi_watchdog();
|
||||
}
|
||||
|
||||
rcu_read_unlock_sched();
|
||||
|
||||
@@ -401,6 +401,11 @@ static int verify_address_len(const void *p)
|
||||
#endif
|
||||
int len;
|
||||
|
||||
if (sp->sadb_address_len <
|
||||
DIV_ROUND_UP(sizeof(*sp) + offsetofend(typeof(*addr), sa_family),
|
||||
sizeof(uint64_t)))
|
||||
return -EINVAL;
|
||||
|
||||
switch (addr->sa_family) {
|
||||
case AF_INET:
|
||||
len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t));
|
||||
@@ -511,6 +516,9 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void *
|
||||
uint16_t ext_type;
|
||||
int ext_len;
|
||||
|
||||
if (len < sizeof(*ehdr))
|
||||
return -EINVAL;
|
||||
|
||||
ext_len = ehdr->sadb_ext_len;
|
||||
ext_len *= sizeof(uint64_t);
|
||||
ext_type = ehdr->sadb_ext_type;
|
||||
|
||||
@@ -96,6 +96,8 @@ def get_thread_info(task):
|
||||
thread_info_addr = task.address + ia64_task_size
|
||||
thread_info = thread_info_addr.cast(thread_info_ptr_type)
|
||||
else:
|
||||
if task.type.fields()[0].type == thread_info_type.get_type():
|
||||
return task['thread_info']
|
||||
thread_info = task['stack'].cast(thread_info_ptr_type)
|
||||
return thread_info.dereference()
|
||||
|
||||
|
||||
@@ -578,7 +578,6 @@ static inline unsigned int muldiv32(unsigned int a, unsigned int b,
|
||||
{
|
||||
u_int64_t n = (u_int64_t) a * b;
|
||||
if (c == 0) {
|
||||
snd_BUG_ON(!n);
|
||||
*r = 0;
|
||||
return UINT_MAX;
|
||||
}
|
||||
|
||||
@@ -221,6 +221,7 @@ static struct snd_seq_client *seq_create_client1(int client_index, int poolsize)
|
||||
rwlock_init(&client->ports_lock);
|
||||
mutex_init(&client->ports_mutex);
|
||||
INIT_LIST_HEAD(&client->ports_list_head);
|
||||
mutex_init(&client->ioctl_mutex);
|
||||
|
||||
/* find free slot in the client table */
|
||||
spin_lock_irqsave(&clients_lock, flags);
|
||||
@@ -2127,7 +2128,9 @@ static long snd_seq_ioctl(struct file *file, unsigned int cmd,
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
mutex_lock(&client->ioctl_mutex);
|
||||
err = handler->func(client, &buf);
|
||||
mutex_unlock(&client->ioctl_mutex);
|
||||
if (err >= 0) {
|
||||
/* Some commands includes a bug in 'dir' field. */
|
||||
if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT ||
|
||||
|
||||
@@ -61,6 +61,7 @@ struct snd_seq_client {
|
||||
struct list_head ports_list_head;
|
||||
rwlock_t ports_lock;
|
||||
struct mutex ports_mutex;
|
||||
struct mutex ioctl_mutex;
|
||||
int convert32; /* convert 32->64bit */
|
||||
|
||||
/* output pool */
|
||||
|
||||
@@ -408,6 +408,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
|
||||
/*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
|
||||
|
||||
/* codec SSID */
|
||||
SND_PCI_QUIRK(0x106b, 0x0600, "iMac 14,1", CS420X_IMAC27_122),
|
||||
SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
|
||||
SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
|
||||
SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
|
||||
|
||||
@@ -5617,6 +5617,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
||||
SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
|
||||
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
|
||||
SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
|
||||
SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
|
||||
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
||||
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
|
||||
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include "elf.h"
|
||||
#include "warn.h"
|
||||
@@ -370,7 +371,8 @@ struct elf *elf_open(const char *name)
|
||||
|
||||
elf->fd = open(name, O_RDONLY);
|
||||
if (elf->fd == -1) {
|
||||
perror("open");
|
||||
fprintf(stderr, "objtool: Can't open '%s': %s\n",
|
||||
name, strerror(errno));
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
||||
@@ -19,18 +19,18 @@ CFLAGS := $(EXTRA_CFLAGS) $(EXTRA_WARNINGS)
|
||||
|
||||
include $(srctree)/tools/scripts/Makefile.arch
|
||||
|
||||
$(call detected_var,ARCH)
|
||||
$(call detected_var,SRCARCH)
|
||||
|
||||
NO_PERF_REGS := 1
|
||||
|
||||
# Additional ARCH settings for ppc
|
||||
ifeq ($(ARCH),powerpc)
|
||||
ifeq ($(SRCARCH),powerpc)
|
||||
NO_PERF_REGS := 0
|
||||
LIBUNWIND_LIBS := -lunwind -lunwind-ppc64
|
||||
endif
|
||||
|
||||
# Additional ARCH settings for x86
|
||||
ifeq ($(ARCH),x86)
|
||||
ifeq ($(SRCARCH),x86)
|
||||
$(call detected,CONFIG_X86)
|
||||
ifeq (${IS_64_BIT}, 1)
|
||||
CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated
|
||||
@@ -43,12 +43,12 @@ ifeq ($(ARCH),x86)
|
||||
NO_PERF_REGS := 0
|
||||
endif
|
||||
|
||||
ifeq ($(ARCH),arm)
|
||||
ifeq ($(SRCARCH),arm)
|
||||
NO_PERF_REGS := 0
|
||||
LIBUNWIND_LIBS = -lunwind -lunwind-arm
|
||||
endif
|
||||
|
||||
ifeq ($(ARCH),arm64)
|
||||
ifeq ($(SRCARCH),arm64)
|
||||
NO_PERF_REGS := 0
|
||||
LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
|
||||
endif
|
||||
@@ -61,7 +61,7 @@ endif
|
||||
# Disable it on all other architectures in case libdw unwind
|
||||
# support is detected in system. Add supported architectures
|
||||
# to the check.
|
||||
ifneq ($(ARCH),$(filter $(ARCH),x86 arm))
|
||||
ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm))
|
||||
NO_LIBDW_DWARF_UNWIND := 1
|
||||
endif
|
||||
|
||||
@@ -115,9 +115,9 @@ endif
|
||||
FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS)
|
||||
FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf
|
||||
|
||||
FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi
|
||||
FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi
|
||||
# include ARCH specific config
|
||||
-include $(src-perf)/arch/$(ARCH)/Makefile
|
||||
-include $(src-perf)/arch/$(SRCARCH)/Makefile
|
||||
|
||||
ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
|
||||
CFLAGS += -DHAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
|
||||
@@ -205,12 +205,12 @@ ifeq ($(DEBUG),0)
|
||||
endif
|
||||
|
||||
CFLAGS += -I$(src-perf)/util/include
|
||||
CFLAGS += -I$(src-perf)/arch/$(ARCH)/include
|
||||
CFLAGS += -I$(src-perf)/arch/$(SRCARCH)/include
|
||||
CFLAGS += -I$(srctree)/tools/include/uapi
|
||||
CFLAGS += -I$(srctree)/tools/include/
|
||||
CFLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/uapi
|
||||
CFLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/
|
||||
CFLAGS += -I$(srctree)/tools/arch/$(ARCH)/
|
||||
CFLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi
|
||||
CFLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/
|
||||
CFLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/
|
||||
|
||||
# $(obj-perf) for generated common-cmds.h
|
||||
# $(obj-perf)/util for generated bison/flex headers
|
||||
@@ -321,7 +321,7 @@ ifndef NO_LIBELF
|
||||
|
||||
ifndef NO_DWARF
|
||||
ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
|
||||
msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled);
|
||||
msg := $(warning DWARF register mappings have not been defined for architecture $(SRCARCH), DWARF support disabled);
|
||||
NO_DWARF := 1
|
||||
else
|
||||
CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS)
|
||||
@@ -346,7 +346,7 @@ ifndef NO_LIBELF
|
||||
CFLAGS += -DHAVE_BPF_PROLOGUE
|
||||
$(call detected,CONFIG_BPF_PROLOGUE)
|
||||
else
|
||||
msg := $(warning BPF prologue is not supported by architecture $(ARCH), missing regs_query_register_offset());
|
||||
msg := $(warning BPF prologue is not supported by architecture $(SRCARCH), missing regs_query_register_offset());
|
||||
endif
|
||||
else
|
||||
msg := $(warning DWARF support is off, BPF prologue is disabled);
|
||||
@@ -372,7 +372,7 @@ ifdef PERF_HAVE_JITDUMP
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(ARCH),powerpc)
|
||||
ifeq ($(SRCARCH),powerpc)
|
||||
ifndef NO_DWARF
|
||||
CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX
|
||||
endif
|
||||
@@ -453,7 +453,7 @@ else
|
||||
endif
|
||||
|
||||
ifndef NO_LOCAL_LIBUNWIND
|
||||
ifeq ($(ARCH),$(filter $(ARCH),arm arm64))
|
||||
ifeq ($(SRCARCH),$(filter $(SRCARCH),arm arm64))
|
||||
$(call feature_check,libunwind-debug-frame)
|
||||
ifneq ($(feature-libunwind-debug-frame), 1)
|
||||
msg := $(warning No debug_frame support found in libunwind);
|
||||
@@ -717,7 +717,7 @@ ifeq (${IS_64_BIT}, 1)
|
||||
NO_PERF_READ_VDSO32 := 1
|
||||
endif
|
||||
endif
|
||||
ifneq ($(ARCH), x86)
|
||||
ifneq ($(SRCARCH), x86)
|
||||
NO_PERF_READ_VDSOX32 := 1
|
||||
endif
|
||||
ifndef NO_PERF_READ_VDSOX32
|
||||
@@ -746,7 +746,7 @@ ifdef LIBBABELTRACE
|
||||
endif
|
||||
|
||||
ifndef NO_AUXTRACE
|
||||
ifeq ($(ARCH),x86)
|
||||
ifeq ($(SRCARCH),x86)
|
||||
ifeq ($(feature-get_cpuid), 0)
|
||||
msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc);
|
||||
NO_AUXTRACE := 1
|
||||
@@ -793,7 +793,7 @@ sysconfdir = $(prefix)/etc
|
||||
ETC_PERFCONFIG = etc/perfconfig
|
||||
endif
|
||||
ifndef lib
|
||||
ifeq ($(ARCH)$(IS_64_BIT), x861)
|
||||
ifeq ($(SRCARCH)$(IS_64_BIT), x861)
|
||||
lib = lib64
|
||||
else
|
||||
lib = lib
|
||||
|
||||
@@ -192,7 +192,7 @@ endif
|
||||
|
||||
ifeq ($(config),0)
|
||||
include $(srctree)/tools/scripts/Makefile.arch
|
||||
-include arch/$(ARCH)/Makefile
|
||||
-include arch/$(SRCARCH)/Makefile
|
||||
endif
|
||||
|
||||
# The FEATURE_DUMP_EXPORT holds location of the actual
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
libperf-y += common.o
|
||||
libperf-y += $(ARCH)/
|
||||
libperf-y += $(SRCARCH)/
|
||||
|
||||
@@ -2,7 +2,7 @@ hostprogs := jevents
|
||||
|
||||
jevents-y += json.o jsmn.o jevents.o
|
||||
pmu-events-y += pmu-events.o
|
||||
JDIR = pmu-events/arch/$(ARCH)
|
||||
JDIR = pmu-events/arch/$(SRCARCH)
|
||||
JSON = $(shell [ -d $(JDIR) ] && \
|
||||
find $(JDIR) -name '*.json' -o -name 'mapfile.csv')
|
||||
#
|
||||
@@ -10,4 +10,4 @@ JSON = $(shell [ -d $(JDIR) ] && \
|
||||
# directory and create tables in pmu-events.c.
|
||||
#
|
||||
$(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS)
|
||||
$(Q)$(call echo-cmd,gen)$(JEVENTS) $(ARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
|
||||
$(Q)$(call echo-cmd,gen)$(JEVENTS) $(SRCARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
|
||||
|
||||
@@ -71,7 +71,7 @@ $(OUTPUT)tests/llvm-src-relocation.c: tests/bpf-script-test-relocation.c tests/B
|
||||
$(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
|
||||
$(Q)echo ';' >> $@
|
||||
|
||||
ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64 powerpc))
|
||||
ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc))
|
||||
perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
|
||||
endif
|
||||
|
||||
|
||||
@@ -826,7 +826,7 @@ static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
|
||||
|
||||
/*
|
||||
* default get_cpuid(): nothing gets recorded
|
||||
* actual implementation must be in arch/$(ARCH)/util/header.c
|
||||
* actual implementation must be in arch/$(SRCARCH)/util/header.c
|
||||
*/
|
||||
int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user