Merge 4.9.245 into android-4.9-q
Changes in 4.9.245 powerpc/64s: Define MASKABLE_RELON_EXCEPTION_PSERIES_OOL powerpc/64s: move some exception handlers out of line powerpc/64s: flush L1D on kernel entry powerpc: Add a framework for user access tracking powerpc: Implement user_access_begin and friends powerpc: Fix __clear_user() with KUAP enabled powerpc/uaccess: Evaluate macro arguments once, before user access is allowed powerpc/64s: flush L1D after user accesses i2c: imx: use clk notifier for rate changes i2c: imx: Fix external abort on interrupt in exit paths i2c: mux: pca954x: Add missing pca9546 definition to chip_desc powerpc/8xx: Always fault when _PAGE_ACCESSED is not set Input: sunkbd - avoid use-after-free in teardown paths mac80211: always wind down STA state KVM: x86: clflushopt should be treated as a no-op by emulation ACPI: GED: fix -Wformat Linux 4.9.245 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I688b066e99eeb16270414e0c4cb4dc3bb244486c
This commit is contained in:
@@ -2539,6 +2539,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
mds=off [X86]
|
||||
tsx_async_abort=off [X86]
|
||||
kvm.nx_huge_pages=off [X86]
|
||||
no_entry_flush [PPC]
|
||||
no_uaccess_flush [PPC]
|
||||
|
||||
Exceptions:
|
||||
This does not have any effect on
|
||||
@@ -2845,6 +2847,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
|
||||
noefi Disable EFI runtime services support.
|
||||
|
||||
no_entry_flush [PPC] Don't flush the L1-D cache when entering the kernel.
|
||||
|
||||
noexec [IA-64]
|
||||
|
||||
noexec [X86]
|
||||
@@ -2894,6 +2898,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
nospec_store_bypass_disable
|
||||
[HW] Disable all mitigations for the Speculative Store Bypass vulnerability
|
||||
|
||||
no_uaccess_flush
|
||||
[PPC] Don't flush the L1-D cache after accessing user data.
|
||||
|
||||
noxsave [BUGS=X86] Disables x86 extended register state save
|
||||
and restore using xsave. The kernel will fallback to
|
||||
enabling legacy floating-point and sse state.
|
||||
@@ -5044,6 +5051,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
Disables the PV optimizations forcing the HVM guest to
|
||||
run as generic HVM guest with no PV drivers.
|
||||
|
||||
xen.event_eoi_delay= [XEN]
|
||||
How long to delay EOI handling in case of event
|
||||
storms (jiffies). Default is 10.
|
||||
|
||||
xen.event_loop_timeout= [XEN]
|
||||
After which time (jiffies) the event handling loop
|
||||
should start to delay EOI handling. Default is 2.
|
||||
|
||||
xirc2ps_cs= [NET,PCMCIA]
|
||||
Format:
|
||||
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,6 +1,6 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 243
|
||||
SUBLEVEL = 245
|
||||
EXTRAVERSION =
|
||||
NAME = Roaring Lionus
|
||||
|
||||
|
||||
22
arch/powerpc/include/asm/book3s/64/kup-radix.h
Normal file
22
arch/powerpc/include/asm/book3s/64/kup-radix.h
Normal file
@@ -0,0 +1,22 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
|
||||
#define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
|
||||
|
||||
/* Prototype for function defined in exceptions-64s.S */
|
||||
void do_uaccess_flush(void);
|
||||
|
||||
static __always_inline void allow_user_access(void __user *to, const void __user *from,
|
||||
unsigned long size)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void prevent_user_access(void __user *to, const void __user *from,
|
||||
unsigned long size)
|
||||
{
|
||||
if (static_branch_unlikely(&uaccess_flush_key))
|
||||
do_uaccess_flush();
|
||||
}
|
||||
|
||||
#endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */
|
||||
@@ -66,11 +66,18 @@
|
||||
nop; \
|
||||
nop
|
||||
|
||||
#define ENTRY_FLUSH_SLOT \
|
||||
ENTRY_FLUSH_FIXUP_SECTION; \
|
||||
nop; \
|
||||
nop; \
|
||||
nop;
|
||||
|
||||
/*
|
||||
* r10 must be free to use, r13 must be paca
|
||||
*/
|
||||
#define INTERRUPT_TO_KERNEL \
|
||||
STF_ENTRY_BARRIER_SLOT
|
||||
STF_ENTRY_BARRIER_SLOT; \
|
||||
ENTRY_FLUSH_SLOT
|
||||
|
||||
/*
|
||||
* Macros for annotating the expected destination of (h)rfid
|
||||
@@ -563,6 +570,10 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
|
||||
EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_HV, vec); \
|
||||
EXCEPTION_PROLOG_PSERIES_1(label, EXC_HV)
|
||||
|
||||
#define MASKABLE_RELON_EXCEPTION_PSERIES_OOL(vec, label) \
|
||||
EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_PR, vec); \
|
||||
EXCEPTION_PROLOG_PSERIES_1(label, EXC_STD)
|
||||
|
||||
/*
|
||||
* Our exception common code can be passed various "additions"
|
||||
* to specify the behaviour of interrupts, whether to kick the
|
||||
|
||||
@@ -205,6 +205,22 @@ void setup_feature_keys(void);
|
||||
FTR_ENTRY_OFFSET 955b-956b; \
|
||||
.popsection;
|
||||
|
||||
#define UACCESS_FLUSH_FIXUP_SECTION \
|
||||
959: \
|
||||
.pushsection __uaccess_flush_fixup,"a"; \
|
||||
.align 2; \
|
||||
960: \
|
||||
FTR_ENTRY_OFFSET 959b-960b; \
|
||||
.popsection;
|
||||
|
||||
#define ENTRY_FLUSH_FIXUP_SECTION \
|
||||
957: \
|
||||
.pushsection __entry_flush_fixup,"a"; \
|
||||
.align 2; \
|
||||
958: \
|
||||
FTR_ENTRY_OFFSET 957b-958b; \
|
||||
.popsection;
|
||||
|
||||
#define RFI_FLUSH_FIXUP_SECTION \
|
||||
951: \
|
||||
.pushsection __rfi_flush_fixup,"a"; \
|
||||
@@ -236,8 +252,11 @@ void setup_feature_keys(void);
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
extern long stf_barrier_fallback;
|
||||
extern long entry_flush_fallback;
|
||||
extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
|
||||
extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
|
||||
extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
|
||||
extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
|
||||
extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
|
||||
extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
|
||||
extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
|
||||
|
||||
@@ -36,6 +36,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
|
||||
{
|
||||
int oldval = 0, ret;
|
||||
|
||||
allow_write_to_user(uaddr, sizeof(*uaddr));
|
||||
pagefault_disable();
|
||||
|
||||
switch (op) {
|
||||
@@ -62,6 +63,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
|
||||
|
||||
*oval = oldval;
|
||||
|
||||
prevent_write_to_user(uaddr, sizeof(*uaddr));
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -75,6 +77,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
allow_write_to_user(uaddr, sizeof(*uaddr));
|
||||
__asm__ __volatile__ (
|
||||
PPC_ATOMIC_ENTRY_BARRIER
|
||||
"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
|
||||
@@ -97,6 +100,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
: "cc", "memory");
|
||||
|
||||
*uval = prev;
|
||||
prevent_write_to_user(uaddr, sizeof(*uaddr));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
40
arch/powerpc/include/asm/kup.h
Normal file
40
arch/powerpc/include/asm/kup.h
Normal file
@@ -0,0 +1,40 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_POWERPC_KUP_H_
|
||||
#define _ASM_POWERPC_KUP_H_
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
#include <asm/book3s/64/kup-radix.h>
|
||||
#else
|
||||
static inline void allow_user_access(void __user *to, const void __user *from,
|
||||
unsigned long size) { }
|
||||
static inline void prevent_user_access(void __user *to, const void __user *from,
|
||||
unsigned long size) { }
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
||||
static inline void allow_read_from_user(const void __user *from, unsigned long size)
|
||||
{
|
||||
allow_user_access(NULL, from, size);
|
||||
}
|
||||
|
||||
static inline void allow_write_to_user(void __user *to, unsigned long size)
|
||||
{
|
||||
allow_user_access(to, NULL, size);
|
||||
}
|
||||
|
||||
static inline void prevent_read_from_user(const void __user *from, unsigned long size)
|
||||
{
|
||||
prevent_user_access(NULL, from, size);
|
||||
}
|
||||
|
||||
static inline void prevent_write_to_user(void __user *to, unsigned long size)
|
||||
{
|
||||
prevent_user_access(to, NULL, size);
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_POWERPC_KUP_H_ */
|
||||
@@ -84,12 +84,19 @@ static inline bool security_ftr_enabled(unsigned long feature)
|
||||
// Software required to flush link stack on context switch
|
||||
#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull
|
||||
|
||||
// The L1-D cache should be flushed when entering the kernel
|
||||
#define SEC_FTR_L1D_FLUSH_ENTRY 0x0000000000004000ull
|
||||
|
||||
// The L1-D cache should be flushed after user accesses from the kernel
|
||||
#define SEC_FTR_L1D_FLUSH_UACCESS 0x0000000000008000ull
|
||||
|
||||
// Features enabled by default
|
||||
#define SEC_FTR_DEFAULT \
|
||||
(SEC_FTR_L1D_FLUSH_HV | \
|
||||
SEC_FTR_L1D_FLUSH_PR | \
|
||||
SEC_FTR_BNDS_CHK_SPEC_BAR | \
|
||||
SEC_FTR_L1D_FLUSH_ENTRY | \
|
||||
SEC_FTR_L1D_FLUSH_UACCESS | \
|
||||
SEC_FTR_FAVOUR_SECURITY)
|
||||
|
||||
#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
|
||||
|
||||
@@ -50,12 +50,16 @@ enum l1d_flush_type {
|
||||
};
|
||||
|
||||
void setup_rfi_flush(enum l1d_flush_type, bool enable);
|
||||
void setup_entry_flush(bool enable);
|
||||
void setup_uaccess_flush(bool enable);
|
||||
void do_rfi_flush_fixups(enum l1d_flush_type types);
|
||||
#ifdef CONFIG_PPC_BARRIER_NOSPEC
|
||||
void setup_barrier_nospec(void);
|
||||
#else
|
||||
static inline void setup_barrier_nospec(void) { };
|
||||
#endif
|
||||
void do_uaccess_flush_fixups(enum l1d_flush_type types);
|
||||
void do_entry_flush_fixups(enum l1d_flush_type types);
|
||||
void do_barrier_nospec_fixups(bool enable);
|
||||
extern bool barrier_nospec_enabled;
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#include <asm/asm-compat.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/kup.h>
|
||||
|
||||
#define VERIFY_READ 0
|
||||
#define VERIFY_WRITE 1
|
||||
@@ -105,9 +106,14 @@ struct exception_table_entry {
|
||||
__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
#define __get_user(x, ptr) \
|
||||
__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
|
||||
__get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
|
||||
#define __put_user(x, ptr) \
|
||||
__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
|
||||
__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true)
|
||||
|
||||
#define __get_user_allowed(x, ptr) \
|
||||
__get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
|
||||
#define __put_user_allowed(x, ptr) \
|
||||
__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false)
|
||||
|
||||
#define __get_user_inatomic(x, ptr) \
|
||||
__get_user_nosleep((x), (ptr), sizeof(*(ptr)))
|
||||
@@ -161,7 +167,7 @@ extern long __put_user_bad(void);
|
||||
: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
|
||||
#endif /* __powerpc64__ */
|
||||
|
||||
#define __put_user_size(x, ptr, size, retval) \
|
||||
#define __put_user_size_allowed(x, ptr, size, retval) \
|
||||
do { \
|
||||
retval = 0; \
|
||||
switch (size) { \
|
||||
@@ -173,14 +179,28 @@ do { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define __put_user_nocheck(x, ptr, size) \
|
||||
#define __put_user_size(x, ptr, size, retval) \
|
||||
do { \
|
||||
allow_write_to_user(ptr, size); \
|
||||
__put_user_size_allowed(x, ptr, size, retval); \
|
||||
prevent_write_to_user(ptr, size); \
|
||||
} while (0)
|
||||
|
||||
#define __put_user_nocheck(x, ptr, size, do_allow) \
|
||||
({ \
|
||||
long __pu_err; \
|
||||
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
|
||||
__typeof__(*(ptr)) __pu_val = (x); \
|
||||
__typeof__(size) __pu_size = (size); \
|
||||
\
|
||||
if (!is_kernel_addr((unsigned long)__pu_addr)) \
|
||||
might_fault(); \
|
||||
__chk_user_ptr(ptr); \
|
||||
__put_user_size((x), __pu_addr, (size), __pu_err); \
|
||||
__chk_user_ptr(__pu_addr); \
|
||||
if (do_allow) \
|
||||
__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
|
||||
else \
|
||||
__put_user_size_allowed(__pu_val, __pu_addr, __pu_size, __pu_err); \
|
||||
\
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
@@ -188,9 +208,13 @@ do { \
|
||||
({ \
|
||||
long __pu_err = -EFAULT; \
|
||||
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
|
||||
__typeof__(*(ptr)) __pu_val = (x); \
|
||||
__typeof__(size) __pu_size = (size); \
|
||||
\
|
||||
might_fault(); \
|
||||
if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
|
||||
__put_user_size((x), __pu_addr, (size), __pu_err); \
|
||||
if (access_ok(VERIFY_WRITE, __pu_addr, __pu_size)) \
|
||||
__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
|
||||
\
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
@@ -198,8 +222,12 @@ do { \
|
||||
({ \
|
||||
long __pu_err; \
|
||||
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
|
||||
__chk_user_ptr(ptr); \
|
||||
__put_user_size((x), __pu_addr, (size), __pu_err); \
|
||||
__typeof__(*(ptr)) __pu_val = (x); \
|
||||
__typeof__(size) __pu_size = (size); \
|
||||
\
|
||||
__chk_user_ptr(__pu_addr); \
|
||||
__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
|
||||
\
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
@@ -246,7 +274,7 @@ extern long __get_user_bad(void);
|
||||
: "b" (addr), "i" (-EFAULT), "0" (err))
|
||||
#endif /* __powerpc64__ */
|
||||
|
||||
#define __get_user_size(x, ptr, size, retval) \
|
||||
#define __get_user_size_allowed(x, ptr, size, retval) \
|
||||
do { \
|
||||
retval = 0; \
|
||||
__chk_user_ptr(ptr); \
|
||||
@@ -261,17 +289,30 @@ do { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define __get_user_nocheck(x, ptr, size) \
|
||||
#define __get_user_size(x, ptr, size, retval) \
|
||||
do { \
|
||||
allow_read_from_user(ptr, size); \
|
||||
__get_user_size_allowed(x, ptr, size, retval); \
|
||||
prevent_read_from_user(ptr, size); \
|
||||
} while (0)
|
||||
|
||||
#define __get_user_nocheck(x, ptr, size, do_allow) \
|
||||
({ \
|
||||
long __gu_err; \
|
||||
unsigned long __gu_val; \
|
||||
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \
|
||||
__chk_user_ptr(ptr); \
|
||||
__typeof__(size) __gu_size = (size); \
|
||||
\
|
||||
__chk_user_ptr(__gu_addr); \
|
||||
if (!is_kernel_addr((unsigned long)__gu_addr)) \
|
||||
might_fault(); \
|
||||
barrier_nospec(); \
|
||||
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
|
||||
if (do_allow) \
|
||||
__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
|
||||
else \
|
||||
__get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
|
||||
(x) = (__typeof__(*(ptr)))__gu_val; \
|
||||
\
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
@@ -280,12 +321,15 @@ do { \
|
||||
long __gu_err = -EFAULT; \
|
||||
unsigned long __gu_val = 0; \
|
||||
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \
|
||||
__typeof__(size) __gu_size = (size); \
|
||||
\
|
||||
might_fault(); \
|
||||
if (access_ok(VERIFY_READ, __gu_addr, (size))) { \
|
||||
if (access_ok(VERIFY_READ, __gu_addr, __gu_size)) { \
|
||||
barrier_nospec(); \
|
||||
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
|
||||
__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
|
||||
} \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
\
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
@@ -294,10 +338,13 @@ do { \
|
||||
long __gu_err; \
|
||||
unsigned long __gu_val; \
|
||||
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \
|
||||
__chk_user_ptr(ptr); \
|
||||
__typeof__(size) __gu_size = (size); \
|
||||
\
|
||||
__chk_user_ptr(__gu_addr); \
|
||||
barrier_nospec(); \
|
||||
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
|
||||
__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
\
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
@@ -312,9 +359,14 @@ extern unsigned long __copy_tofrom_user(void __user *to,
|
||||
static inline unsigned long copy_from_user(void *to,
|
||||
const void __user *from, unsigned long n)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
if (likely(access_ok(VERIFY_READ, from, n))) {
|
||||
check_object_size(to, n, false);
|
||||
return __copy_tofrom_user((__force void __user *)to, from, n);
|
||||
allow_user_access(to, from, n);
|
||||
ret = __copy_tofrom_user((__force void __user *)to, from, n);
|
||||
prevent_user_access(to, from, n);
|
||||
return ret;
|
||||
}
|
||||
memset(to, 0, n);
|
||||
return n;
|
||||
@@ -347,8 +399,9 @@ extern unsigned long copy_in_user(void __user *to, const void __user *from,
|
||||
static inline unsigned long __copy_from_user_inatomic(void *to,
|
||||
const void __user *from, unsigned long n)
|
||||
{
|
||||
unsigned long ret;
|
||||
if (__builtin_constant_p(n) && (n <= 8)) {
|
||||
unsigned long ret = 1;
|
||||
ret = 1;
|
||||
|
||||
switch (n) {
|
||||
case 1:
|
||||
@@ -375,27 +428,32 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
|
||||
check_object_size(to, n, false);
|
||||
|
||||
barrier_nospec();
|
||||
return __copy_tofrom_user((__force void __user *)to, from, n);
|
||||
allow_read_from_user(from, n);
|
||||
ret = __copy_tofrom_user((__force void __user *)to, from, n);
|
||||
prevent_read_from_user(from, n);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline unsigned long __copy_to_user_inatomic(void __user *to,
|
||||
const void *from, unsigned long n)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
if (__builtin_constant_p(n) && (n <= 8)) {
|
||||
unsigned long ret = 1;
|
||||
ret = 1;
|
||||
|
||||
switch (n) {
|
||||
case 1:
|
||||
__put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
|
||||
__put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret);
|
||||
break;
|
||||
case 2:
|
||||
__put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
|
||||
__put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret);
|
||||
break;
|
||||
case 4:
|
||||
__put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
|
||||
__put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret);
|
||||
break;
|
||||
case 8:
|
||||
__put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
|
||||
__put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret);
|
||||
break;
|
||||
}
|
||||
if (ret == 0)
|
||||
@@ -403,8 +461,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
|
||||
}
|
||||
|
||||
check_object_size(from, n, true);
|
||||
|
||||
return __copy_tofrom_user(to, (__force const void __user *)from, n);
|
||||
allow_write_to_user(to, n);
|
||||
ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
|
||||
prevent_write_to_user(to, n);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline unsigned long __copy_from_user(void *to,
|
||||
@@ -421,20 +481,39 @@ static inline unsigned long __copy_to_user(void __user *to,
|
||||
return __copy_to_user_inatomic(to, from, size);
|
||||
}
|
||||
|
||||
extern unsigned long __clear_user(void __user *addr, unsigned long size);
|
||||
unsigned long __arch_clear_user(void __user *addr, unsigned long size);
|
||||
|
||||
static inline unsigned long clear_user(void __user *addr, unsigned long size)
|
||||
{
|
||||
unsigned long ret = size;
|
||||
might_fault();
|
||||
if (likely(access_ok(VERIFY_WRITE, addr, size)))
|
||||
return __clear_user(addr, size);
|
||||
return size;
|
||||
if (likely(access_ok(VERIFY_WRITE, addr, size))) {
|
||||
allow_write_to_user(addr, size);
|
||||
ret = __arch_clear_user(addr, size);
|
||||
prevent_write_to_user(addr, size);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline unsigned long __clear_user(void __user *addr, unsigned long size)
|
||||
{
|
||||
return clear_user(addr, size);
|
||||
}
|
||||
|
||||
extern long strncpy_from_user(char *dst, const char __user *src, long count);
|
||||
extern __must_check long strlen_user(const char __user *str);
|
||||
extern __must_check long strnlen_user(const char __user *str, long n);
|
||||
|
||||
|
||||
#define user_access_begin() do { } while (0)
|
||||
#define user_access_end() prevent_user_access(NULL, NULL, ~0ul)
|
||||
|
||||
#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
|
||||
#define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
|
||||
#define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e)
|
||||
#define unsafe_copy_to_user(d, s, l, e) \
|
||||
unsafe_op_wrap(__copy_to_user_inatomic(d, s, l), e)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
|
||||
@@ -487,7 +487,7 @@ EXC_COMMON_BEGIN(unrecover_mce)
|
||||
b 1b
|
||||
|
||||
|
||||
EXC_REAL(data_access, 0x300, 0x380)
|
||||
EXC_REAL_OOL(data_access, 0x300, 0x380)
|
||||
EXC_VIRT(data_access, 0x4300, 0x4380, 0x300)
|
||||
TRAMP_KVM_SKIP(PACA_EXGEN, 0x300)
|
||||
|
||||
@@ -519,6 +519,10 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
|
||||
EXC_REAL_BEGIN(data_access_slb, 0x380, 0x400)
|
||||
SET_SCRATCH0(r13)
|
||||
EXCEPTION_PROLOG_0(PACA_EXSLB)
|
||||
b tramp_data_access_slb
|
||||
EXC_REAL_END(data_access_slb, 0x380, 0x400)
|
||||
|
||||
TRAMP_REAL_BEGIN(tramp_data_access_slb)
|
||||
EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
|
||||
std r3,PACA_EXSLB+EX_R3(r13)
|
||||
mfspr r3,SPRN_DAR
|
||||
@@ -537,7 +541,6 @@ EXC_REAL_BEGIN(data_access_slb, 0x380, 0x400)
|
||||
mtctr r10
|
||||
bctr
|
||||
#endif
|
||||
EXC_REAL_END(data_access_slb, 0x380, 0x400)
|
||||
|
||||
EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x4400)
|
||||
SET_SCRATCH0(r13)
|
||||
@@ -564,7 +567,7 @@ EXC_VIRT_END(data_access_slb, 0x4380, 0x4400)
|
||||
TRAMP_KVM_SKIP(PACA_EXSLB, 0x380)
|
||||
|
||||
|
||||
EXC_REAL(instruction_access, 0x400, 0x480)
|
||||
EXC_REAL_OOL(instruction_access, 0x400, 0x480)
|
||||
EXC_VIRT(instruction_access, 0x4400, 0x4480, 0x400)
|
||||
TRAMP_KVM(PACA_EXGEN, 0x400)
|
||||
|
||||
@@ -587,6 +590,10 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
|
||||
EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x500)
|
||||
SET_SCRATCH0(r13)
|
||||
EXCEPTION_PROLOG_0(PACA_EXSLB)
|
||||
b tramp_instruction_access_slb
|
||||
EXC_REAL_END(instruction_access_slb, 0x480, 0x500)
|
||||
|
||||
TRAMP_REAL_BEGIN(tramp_instruction_access_slb)
|
||||
EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
|
||||
std r3,PACA_EXSLB+EX_R3(r13)
|
||||
mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
|
||||
@@ -600,7 +607,6 @@ EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x500)
|
||||
mtctr r10
|
||||
bctr
|
||||
#endif
|
||||
EXC_REAL_END(instruction_access_slb, 0x480, 0x500)
|
||||
|
||||
EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x4500)
|
||||
SET_SCRATCH0(r13)
|
||||
@@ -851,13 +857,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
|
||||
|
||||
|
||||
EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x980)
|
||||
EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x4980, 0x900)
|
||||
EXC_VIRT_OOL_MASKABLE(decrementer, 0x4900, 0x4980, 0x900)
|
||||
TRAMP_KVM(PACA_EXGEN, 0x900)
|
||||
EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
|
||||
|
||||
|
||||
EXC_REAL_HV(hdecrementer, 0x980, 0xa00)
|
||||
EXC_VIRT_HV(hdecrementer, 0x4980, 0x4a00, 0x980)
|
||||
EXC_REAL_OOL_HV(hdecrementer, 0x980, 0xa00)
|
||||
EXC_VIRT_OOL_HV(hdecrementer, 0x4980, 0x4a00, 0x980)
|
||||
TRAMP_KVM_HV(PACA_EXGEN, 0x980)
|
||||
EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt)
|
||||
|
||||
@@ -1371,6 +1377,48 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback)
|
||||
.endr
|
||||
blr
|
||||
|
||||
/* Clobbers r10, r11, ctr */
|
||||
.macro L1D_DISPLACEMENT_FLUSH
|
||||
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
|
||||
ld r11,PACA_L1D_FLUSH_SIZE(r13)
|
||||
srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
|
||||
mtctr r11
|
||||
DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
|
||||
|
||||
/* order ld/st prior to dcbt stop all streams with flushing */
|
||||
sync
|
||||
|
||||
/*
|
||||
* The load adresses are at staggered offsets within cachelines,
|
||||
* which suits some pipelines better (on others it should not
|
||||
* hurt).
|
||||
*/
|
||||
1:
|
||||
ld r11,(0x80 + 8)*0(r10)
|
||||
ld r11,(0x80 + 8)*1(r10)
|
||||
ld r11,(0x80 + 8)*2(r10)
|
||||
ld r11,(0x80 + 8)*3(r10)
|
||||
ld r11,(0x80 + 8)*4(r10)
|
||||
ld r11,(0x80 + 8)*5(r10)
|
||||
ld r11,(0x80 + 8)*6(r10)
|
||||
ld r11,(0x80 + 8)*7(r10)
|
||||
addi r10,r10,0x80*8
|
||||
bdnz 1b
|
||||
.endm
|
||||
|
||||
USE_TEXT_SECTION()
|
||||
|
||||
_GLOBAL(do_uaccess_flush)
|
||||
UACCESS_FLUSH_FIXUP_SECTION
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
blr
|
||||
L1D_DISPLACEMENT_FLUSH
|
||||
blr
|
||||
_ASM_NOKPROBE_SYMBOL(do_uaccess_flush)
|
||||
EXPORT_SYMBOL(do_uaccess_flush)
|
||||
|
||||
/*
|
||||
* Real mode exceptions actually use this too, but alternate
|
||||
* instruction code patches (which end up in the common .text area)
|
||||
@@ -1626,32 +1674,7 @@ rfi_flush_fallback:
|
||||
std r10,PACA_EXRFI+EX_R10(r13)
|
||||
std r11,PACA_EXRFI+EX_R11(r13)
|
||||
mfctr r9
|
||||
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
|
||||
ld r11,PACA_L1D_FLUSH_SIZE(r13)
|
||||
srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
|
||||
mtctr r11
|
||||
DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
|
||||
|
||||
/* order ld/st prior to dcbt stop all streams with flushing */
|
||||
sync
|
||||
|
||||
/*
|
||||
* The load adresses are at staggered offsets within cachelines,
|
||||
* which suits some pipelines better (on others it should not
|
||||
* hurt).
|
||||
*/
|
||||
1:
|
||||
ld r11,(0x80 + 8)*0(r10)
|
||||
ld r11,(0x80 + 8)*1(r10)
|
||||
ld r11,(0x80 + 8)*2(r10)
|
||||
ld r11,(0x80 + 8)*3(r10)
|
||||
ld r11,(0x80 + 8)*4(r10)
|
||||
ld r11,(0x80 + 8)*5(r10)
|
||||
ld r11,(0x80 + 8)*6(r10)
|
||||
ld r11,(0x80 + 8)*7(r10)
|
||||
addi r10,r10,0x80*8
|
||||
bdnz 1b
|
||||
|
||||
L1D_DISPLACEMENT_FLUSH
|
||||
mtctr r9
|
||||
ld r9,PACA_EXRFI+EX_R9(r13)
|
||||
ld r10,PACA_EXRFI+EX_R10(r13)
|
||||
@@ -1667,32 +1690,7 @@ hrfi_flush_fallback:
|
||||
std r10,PACA_EXRFI+EX_R10(r13)
|
||||
std r11,PACA_EXRFI+EX_R11(r13)
|
||||
mfctr r9
|
||||
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
|
||||
ld r11,PACA_L1D_FLUSH_SIZE(r13)
|
||||
srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
|
||||
mtctr r11
|
||||
DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
|
||||
|
||||
/* order ld/st prior to dcbt stop all streams with flushing */
|
||||
sync
|
||||
|
||||
/*
|
||||
* The load adresses are at staggered offsets within cachelines,
|
||||
* which suits some pipelines better (on others it should not
|
||||
* hurt).
|
||||
*/
|
||||
1:
|
||||
ld r11,(0x80 + 8)*0(r10)
|
||||
ld r11,(0x80 + 8)*1(r10)
|
||||
ld r11,(0x80 + 8)*2(r10)
|
||||
ld r11,(0x80 + 8)*3(r10)
|
||||
ld r11,(0x80 + 8)*4(r10)
|
||||
ld r11,(0x80 + 8)*5(r10)
|
||||
ld r11,(0x80 + 8)*6(r10)
|
||||
ld r11,(0x80 + 8)*7(r10)
|
||||
addi r10,r10,0x80*8
|
||||
bdnz 1b
|
||||
|
||||
L1D_DISPLACEMENT_FLUSH
|
||||
mtctr r9
|
||||
ld r9,PACA_EXRFI+EX_R9(r13)
|
||||
ld r10,PACA_EXRFI+EX_R10(r13)
|
||||
@@ -1700,6 +1698,20 @@ hrfi_flush_fallback:
|
||||
GET_SCRATCH0(r13);
|
||||
hrfid
|
||||
|
||||
.globl entry_flush_fallback
|
||||
entry_flush_fallback:
|
||||
std r9,PACA_EXRFI+EX_R9(r13)
|
||||
std r10,PACA_EXRFI+EX_R10(r13)
|
||||
std r11,PACA_EXRFI+EX_R11(r13)
|
||||
mfctr r9
|
||||
L1D_DISPLACEMENT_FLUSH
|
||||
mtctr r9
|
||||
ld r9,PACA_EXRFI+EX_R9(r13)
|
||||
ld r10,PACA_EXRFI+EX_R10(r13)
|
||||
ld r11,PACA_EXRFI+EX_R11(r13)
|
||||
blr
|
||||
|
||||
|
||||
/*
|
||||
* Called from arch_local_irq_enable when an interrupt needs
|
||||
* to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
|
||||
|
||||
@@ -359,11 +359,9 @@ InstructionTLBMiss:
|
||||
/* Load the MI_TWC with the attributes for this "segment." */
|
||||
MTSPR_CPU6(SPRN_MI_TWC, r11, r3) /* Set segment attributes */
|
||||
|
||||
#ifdef CONFIG_SWAP
|
||||
rlwinm r11, r10, 32-5, _PAGE_PRESENT
|
||||
rlwinm r11, r10, 32-11, _PAGE_PRESENT
|
||||
and r11, r11, r10
|
||||
rlwimi r10, r11, 0, _PAGE_PRESENT
|
||||
#endif
|
||||
li r11, RPN_PATTERN
|
||||
/* The Linux PTE won't go exactly into the MMU TLB.
|
||||
* Software indicator bits 20-23 and 28 must be clear.
|
||||
@@ -443,11 +441,9 @@ _ENTRY(DTLBMiss_jmp)
|
||||
* r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
|
||||
* r10 = (r10 & ~PRESENT) | r11;
|
||||
*/
|
||||
#ifdef CONFIG_SWAP
|
||||
rlwinm r11, r10, 32-5, _PAGE_PRESENT
|
||||
rlwinm r11, r10, 32-11, _PAGE_PRESENT
|
||||
and r11, r11, r10
|
||||
rlwimi r10, r11, 0, _PAGE_PRESENT
|
||||
#endif
|
||||
/* The Linux PTE won't go exactly into the MMU TLB.
|
||||
* Software indicator bits 22 and 28 must be clear.
|
||||
* Software indicator bits 24, 25, 26, and 27 must be
|
||||
|
||||
@@ -685,7 +685,13 @@ early_initcall(disable_hardlockup_detector);
|
||||
static enum l1d_flush_type enabled_flush_types;
|
||||
static void *l1d_flush_fallback_area;
|
||||
static bool no_rfi_flush;
|
||||
static bool no_entry_flush;
|
||||
static bool no_uaccess_flush;
|
||||
bool rfi_flush;
|
||||
bool entry_flush;
|
||||
bool uaccess_flush;
|
||||
DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
|
||||
EXPORT_SYMBOL(uaccess_flush_key);
|
||||
|
||||
static int __init handle_no_rfi_flush(char *p)
|
||||
{
|
||||
@@ -695,6 +701,22 @@ static int __init handle_no_rfi_flush(char *p)
|
||||
}
|
||||
early_param("no_rfi_flush", handle_no_rfi_flush);
|
||||
|
||||
static int __init handle_no_entry_flush(char *p)
|
||||
{
|
||||
pr_info("entry-flush: disabled on command line.");
|
||||
no_entry_flush = true;
|
||||
return 0;
|
||||
}
|
||||
early_param("no_entry_flush", handle_no_entry_flush);
|
||||
|
||||
static int __init handle_no_uaccess_flush(char *p)
|
||||
{
|
||||
pr_info("uaccess-flush: disabled on command line.");
|
||||
no_uaccess_flush = true;
|
||||
return 0;
|
||||
}
|
||||
early_param("no_uaccess_flush", handle_no_uaccess_flush);
|
||||
|
||||
/*
|
||||
* The RFI flush is not KPTI, but because users will see doco that says to use
|
||||
* nopti we hijack that option here to also disable the RFI flush.
|
||||
@@ -726,6 +748,32 @@ void rfi_flush_enable(bool enable)
|
||||
rfi_flush = enable;
|
||||
}
|
||||
|
||||
void entry_flush_enable(bool enable)
|
||||
{
|
||||
if (enable) {
|
||||
do_entry_flush_fixups(enabled_flush_types);
|
||||
on_each_cpu(do_nothing, NULL, 1);
|
||||
} else {
|
||||
do_entry_flush_fixups(L1D_FLUSH_NONE);
|
||||
}
|
||||
|
||||
entry_flush = enable;
|
||||
}
|
||||
|
||||
void uaccess_flush_enable(bool enable)
|
||||
{
|
||||
if (enable) {
|
||||
do_uaccess_flush_fixups(enabled_flush_types);
|
||||
static_branch_enable(&uaccess_flush_key);
|
||||
on_each_cpu(do_nothing, NULL, 1);
|
||||
} else {
|
||||
static_branch_disable(&uaccess_flush_key);
|
||||
do_uaccess_flush_fixups(L1D_FLUSH_NONE);
|
||||
}
|
||||
|
||||
uaccess_flush = enable;
|
||||
}
|
||||
|
||||
static void __ref init_fallback_flush(void)
|
||||
{
|
||||
u64 l1d_size, limit;
|
||||
@@ -771,6 +819,24 @@ void setup_rfi_flush(enum l1d_flush_type types, bool enable)
|
||||
rfi_flush_enable(enable);
|
||||
}
|
||||
|
||||
void setup_entry_flush(bool enable)
|
||||
{
|
||||
if (cpu_mitigations_off())
|
||||
return;
|
||||
|
||||
if (!no_entry_flush)
|
||||
entry_flush_enable(enable);
|
||||
}
|
||||
|
||||
void setup_uaccess_flush(bool enable)
|
||||
{
|
||||
if (cpu_mitigations_off())
|
||||
return;
|
||||
|
||||
if (!no_uaccess_flush)
|
||||
uaccess_flush_enable(enable);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static int rfi_flush_set(void *data, u64 val)
|
||||
{
|
||||
@@ -798,9 +864,63 @@ static int rfi_flush_get(void *data, u64 *val)
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
|
||||
|
||||
static int entry_flush_set(void *data, u64 val)
|
||||
{
|
||||
bool enable;
|
||||
|
||||
if (val == 1)
|
||||
enable = true;
|
||||
else if (val == 0)
|
||||
enable = false;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
/* Only do anything if we're changing state */
|
||||
if (enable != entry_flush)
|
||||
entry_flush_enable(enable);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int entry_flush_get(void *data, u64 *val)
|
||||
{
|
||||
*val = entry_flush ? 1 : 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
|
||||
|
||||
static int uaccess_flush_set(void *data, u64 val)
|
||||
{
|
||||
bool enable;
|
||||
|
||||
if (val == 1)
|
||||
enable = true;
|
||||
else if (val == 0)
|
||||
enable = false;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
/* Only do anything if we're changing state */
|
||||
if (enable != uaccess_flush)
|
||||
uaccess_flush_enable(enable);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uaccess_flush_get(void *data, u64 *val)
|
||||
{
|
||||
*val = uaccess_flush ? 1 : 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
|
||||
|
||||
static __init int rfi_flush_debugfs_init(void)
|
||||
{
|
||||
debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
|
||||
debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush);
|
||||
debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush);
|
||||
return 0;
|
||||
}
|
||||
device_initcall(rfi_flush_debugfs_init);
|
||||
|
||||
@@ -140,6 +140,20 @@ SECTIONS
|
||||
__stop___stf_entry_barrier_fixup = .;
|
||||
}
|
||||
|
||||
. = ALIGN(8);
|
||||
__uaccess_flush_fixup : AT(ADDR(__uaccess_flush_fixup) - LOAD_OFFSET) {
|
||||
__start___uaccess_flush_fixup = .;
|
||||
*(__uaccess_flush_fixup)
|
||||
__stop___uaccess_flush_fixup = .;
|
||||
}
|
||||
|
||||
. = ALIGN(8);
|
||||
__entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) {
|
||||
__start___entry_flush_fixup = .;
|
||||
*(__entry_flush_fixup)
|
||||
__stop___entry_flush_fixup = .;
|
||||
}
|
||||
|
||||
. = ALIGN(8);
|
||||
__stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
|
||||
__start___stf_exit_barrier_fixup = .;
|
||||
|
||||
@@ -29,6 +29,7 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
|
||||
unsigned int csum;
|
||||
|
||||
might_sleep();
|
||||
allow_read_from_user(src, len);
|
||||
|
||||
*err_ptr = 0;
|
||||
|
||||
@@ -60,6 +61,7 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
|
||||
}
|
||||
|
||||
out:
|
||||
prevent_read_from_user(src, len);
|
||||
return (__force __wsum)csum;
|
||||
}
|
||||
EXPORT_SYMBOL(csum_and_copy_from_user);
|
||||
@@ -70,6 +72,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
|
||||
unsigned int csum;
|
||||
|
||||
might_sleep();
|
||||
allow_write_to_user(dst, len);
|
||||
|
||||
*err_ptr = 0;
|
||||
|
||||
@@ -97,6 +100,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
|
||||
}
|
||||
|
||||
out:
|
||||
prevent_write_to_user(dst, len);
|
||||
return (__force __wsum)csum;
|
||||
}
|
||||
EXPORT_SYMBOL(csum_and_copy_to_user);
|
||||
|
||||
@@ -232,6 +232,110 @@ void do_stf_barrier_fixups(enum stf_barrier_type types)
|
||||
do_stf_exit_barrier_fixups(types);
|
||||
}
|
||||
|
||||
void do_uaccess_flush_fixups(enum l1d_flush_type types)
|
||||
{
|
||||
unsigned int instrs[4], *dest;
|
||||
long *start, *end;
|
||||
int i;
|
||||
|
||||
start = PTRRELOC(&__start___uaccess_flush_fixup);
|
||||
end = PTRRELOC(&__stop___uaccess_flush_fixup);
|
||||
|
||||
instrs[0] = 0x60000000; /* nop */
|
||||
instrs[1] = 0x60000000; /* nop */
|
||||
instrs[2] = 0x60000000; /* nop */
|
||||
instrs[3] = 0x4e800020; /* blr */
|
||||
|
||||
i = 0;
|
||||
if (types == L1D_FLUSH_FALLBACK) {
|
||||
instrs[3] = 0x60000000; /* nop */
|
||||
/* fallthrough to fallback flush */
|
||||
}
|
||||
|
||||
if (types & L1D_FLUSH_ORI) {
|
||||
instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
|
||||
instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
|
||||
}
|
||||
|
||||
if (types & L1D_FLUSH_MTTRIG)
|
||||
instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
|
||||
|
||||
for (i = 0; start < end; start++, i++) {
|
||||
dest = (void *)start + *start;
|
||||
|
||||
pr_devel("patching dest %lx\n", (unsigned long)dest);
|
||||
|
||||
patch_instruction(dest, instrs[0]);
|
||||
|
||||
patch_instruction((dest + 1), instrs[1]);
|
||||
patch_instruction((dest + 2), instrs[2]);
|
||||
patch_instruction((dest + 3), instrs[3]);
|
||||
}
|
||||
|
||||
printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i,
|
||||
(types == L1D_FLUSH_NONE) ? "no" :
|
||||
(types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
|
||||
(types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
|
||||
? "ori+mttrig type"
|
||||
: "ori type" :
|
||||
(types & L1D_FLUSH_MTTRIG) ? "mttrig type"
|
||||
: "unknown");
|
||||
}
|
||||
|
||||
void do_entry_flush_fixups(enum l1d_flush_type types)
|
||||
{
|
||||
unsigned int instrs[3], *dest;
|
||||
long *start, *end;
|
||||
int i;
|
||||
|
||||
start = PTRRELOC(&__start___entry_flush_fixup);
|
||||
end = PTRRELOC(&__stop___entry_flush_fixup);
|
||||
|
||||
instrs[0] = 0x60000000; /* nop */
|
||||
instrs[1] = 0x60000000; /* nop */
|
||||
instrs[2] = 0x60000000; /* nop */
|
||||
|
||||
i = 0;
|
||||
if (types == L1D_FLUSH_FALLBACK) {
|
||||
instrs[i++] = 0x7d4802a6; /* mflr r10 */
|
||||
instrs[i++] = 0x60000000; /* branch patched below */
|
||||
instrs[i++] = 0x7d4803a6; /* mtlr r10 */
|
||||
}
|
||||
|
||||
if (types & L1D_FLUSH_ORI) {
|
||||
instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
|
||||
instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
|
||||
}
|
||||
|
||||
if (types & L1D_FLUSH_MTTRIG)
|
||||
instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
|
||||
|
||||
for (i = 0; start < end; start++, i++) {
|
||||
dest = (void *)start + *start;
|
||||
|
||||
pr_devel("patching dest %lx\n", (unsigned long)dest);
|
||||
|
||||
patch_instruction(dest, instrs[0]);
|
||||
|
||||
if (types == L1D_FLUSH_FALLBACK)
|
||||
patch_branch((dest + 1), (unsigned long)&entry_flush_fallback,
|
||||
BRANCH_SET_LINK);
|
||||
else
|
||||
patch_instruction((dest + 1), instrs[1]);
|
||||
|
||||
patch_instruction((dest + 2), instrs[2]);
|
||||
}
|
||||
|
||||
printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
|
||||
(types == L1D_FLUSH_NONE) ? "no" :
|
||||
(types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
|
||||
(types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
|
||||
? "ori+mttrig type"
|
||||
: "ori type" :
|
||||
(types & L1D_FLUSH_MTTRIG) ? "mttrig type"
|
||||
: "unknown");
|
||||
}
|
||||
|
||||
void do_rfi_flush_fixups(enum l1d_flush_type types)
|
||||
{
|
||||
unsigned int instrs[3], *dest;
|
||||
|
||||
@@ -89,7 +89,7 @@ _GLOBAL(memchr)
|
||||
EXPORT_SYMBOL(memchr)
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
_GLOBAL(__clear_user)
|
||||
_GLOBAL(__arch_clear_user)
|
||||
addi r6,r3,-4
|
||||
li r3,0
|
||||
li r5,0
|
||||
@@ -130,5 +130,5 @@ _GLOBAL(__clear_user)
|
||||
PPC_LONG 1b,91b
|
||||
PPC_LONG 8b,92b
|
||||
.text
|
||||
EXPORT_SYMBOL(__clear_user)
|
||||
EXPORT_SYMBOL(__arch_clear_user)
|
||||
#endif
|
||||
|
||||
@@ -28,7 +28,7 @@ PPC64_CACHES:
|
||||
.section ".text"
|
||||
|
||||
/**
|
||||
* __clear_user: - Zero a block of memory in user space, with less checking.
|
||||
* __arch_clear_user: - Zero a block of memory in user space, with less checking.
|
||||
* @to: Destination address, in user space.
|
||||
* @n: Number of bytes to zero.
|
||||
*
|
||||
@@ -78,7 +78,7 @@ err3; stb r0,0(r3)
|
||||
mr r3,r4
|
||||
blr
|
||||
|
||||
_GLOBAL_TOC(__clear_user)
|
||||
_GLOBAL_TOC(__arch_clear_user)
|
||||
cmpdi r4,32
|
||||
neg r6,r3
|
||||
li r0,0
|
||||
@@ -201,4 +201,4 @@ err1; dcbz 0,r3
|
||||
cmpdi r4,32
|
||||
blt .Lshort_clear
|
||||
b .Lmedium_clear
|
||||
EXPORT_SYMBOL(__clear_user)
|
||||
EXPORT_SYMBOL(__arch_clear_user)
|
||||
|
||||
@@ -124,12 +124,27 @@ static void pnv_setup_rfi_flush(void)
|
||||
type = L1D_FLUSH_ORI;
|
||||
}
|
||||
|
||||
/*
|
||||
* 4.9 doesn't support Power9 bare metal, so we don't need to flush
|
||||
* here - the flushes fix a P9 specific vulnerability.
|
||||
*/
|
||||
security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
|
||||
security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
|
||||
|
||||
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
|
||||
(security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \
|
||||
security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
|
||||
|
||||
setup_rfi_flush(type, enable);
|
||||
setup_count_cache_flush();
|
||||
|
||||
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
|
||||
security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY);
|
||||
setup_entry_flush(enable);
|
||||
|
||||
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
|
||||
security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS);
|
||||
setup_uaccess_flush(enable);
|
||||
}
|
||||
|
||||
static void __init pnv_setup_arch(void)
|
||||
|
||||
@@ -535,6 +535,14 @@ void pseries_setup_rfi_flush(void)
|
||||
|
||||
setup_rfi_flush(types, enable);
|
||||
setup_count_cache_flush();
|
||||
|
||||
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
|
||||
security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY);
|
||||
setup_entry_flush(enable);
|
||||
|
||||
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
|
||||
security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS);
|
||||
setup_uaccess_flush(enable);
|
||||
}
|
||||
|
||||
static void __init pSeries_setup_arch(void)
|
||||
|
||||
@@ -1117,7 +1117,7 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
|
||||
if (!filter->range || !filter->size)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!filter->inode) {
|
||||
if (!filter->path.dentry) {
|
||||
if (!valid_kernel_ip(filter->offset))
|
||||
return -EINVAL;
|
||||
|
||||
@@ -1144,7 +1144,7 @@ static void pt_event_addr_filters_sync(struct perf_event *event)
|
||||
return;
|
||||
|
||||
list_for_each_entry(filter, &head->list, entry) {
|
||||
if (filter->inode && !offs[range]) {
|
||||
if (filter->path.dentry && !offs[range]) {
|
||||
msr_a = msr_b = 0;
|
||||
} else {
|
||||
/* apply the offset */
|
||||
|
||||
@@ -1248,6 +1248,14 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool is_spec_ib_user_controlled(void)
|
||||
{
|
||||
return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
|
||||
spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
|
||||
}
|
||||
|
||||
static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
|
||||
{
|
||||
switch (ctrl) {
|
||||
@@ -1255,17 +1263,26 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
|
||||
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
|
||||
return 0;
|
||||
/*
|
||||
* Indirect branch speculation is always disabled in strict
|
||||
* mode. It can neither be enabled if it was force-disabled
|
||||
* by a previous prctl call.
|
||||
|
||||
/*
|
||||
* With strict mode for both IBPB and STIBP, the instruction
|
||||
* code paths avoid checking this task flag and instead,
|
||||
* unconditionally run the instruction. However, STIBP and IBPB
|
||||
* are independent and either can be set to conditionally
|
||||
* enabled regardless of the mode of the other.
|
||||
*
|
||||
* If either is set to conditional, allow the task flag to be
|
||||
* updated, unless it was force-disabled by a previous prctl
|
||||
* call. Currently, this is possible on an AMD CPU which has the
|
||||
* feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
|
||||
* kernel is booted with 'spectre_v2_user=seccomp', then
|
||||
* spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
|
||||
* spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
|
||||
*/
|
||||
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
|
||||
if (!is_spec_ib_user_controlled() ||
|
||||
task_spec_ib_force_disable(task))
|
||||
return -EPERM;
|
||||
|
||||
task_clear_spec_ib_disable(task);
|
||||
task_update_spec_tif(task);
|
||||
break;
|
||||
@@ -1278,10 +1295,10 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
|
||||
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
|
||||
return -EPERM;
|
||||
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
|
||||
|
||||
if (!is_spec_ib_user_controlled())
|
||||
return 0;
|
||||
|
||||
task_set_spec_ib_disable(task);
|
||||
if (ctrl == PR_SPEC_FORCE_DISABLE)
|
||||
task_set_spec_ib_force_disable(task);
|
||||
@@ -1344,20 +1361,17 @@ static int ib_prctl_get(struct task_struct *task)
|
||||
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
|
||||
return PR_SPEC_ENABLE;
|
||||
else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
|
||||
return PR_SPEC_DISABLE;
|
||||
else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
|
||||
spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
|
||||
else if (is_spec_ib_user_controlled()) {
|
||||
if (task_spec_ib_force_disable(task))
|
||||
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
|
||||
if (task_spec_ib_disable(task))
|
||||
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
|
||||
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
|
||||
} else
|
||||
} else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
|
||||
return PR_SPEC_DISABLE;
|
||||
else
|
||||
return PR_SPEC_NOT_AFFECTED;
|
||||
}
|
||||
|
||||
|
||||
@@ -3934,6 +3934,12 @@ static int em_clflush(struct x86_emulate_ctxt *ctxt)
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
/* emulating clflushopt regardless of cpuid */
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
static int em_movsxd(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
ctxt->dst.val = (s32) ctxt->src.val;
|
||||
@@ -4423,7 +4429,7 @@ static const struct opcode group11[] = {
|
||||
};
|
||||
|
||||
static const struct gprefix pfx_0f_ae_7 = {
|
||||
I(SrcMem | ByteOp, em_clflush), N, N, N,
|
||||
I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
|
||||
};
|
||||
|
||||
static const struct group_dual group15 = { {
|
||||
|
||||
@@ -104,7 +104,7 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
|
||||
|
||||
switch (gsi) {
|
||||
case 0 ... 255:
|
||||
sprintf(ev_name, "_%c%02hhX",
|
||||
sprintf(ev_name, "_%c%02X",
|
||||
trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi);
|
||||
|
||||
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
|
||||
|
||||
@@ -183,7 +183,7 @@ static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
|
||||
|
||||
#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
|
||||
|
||||
static int do_block_io_op(struct xen_blkif_ring *ring);
|
||||
static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
|
||||
static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
|
||||
struct blkif_request *req,
|
||||
struct pending_req *pending_req);
|
||||
@@ -608,6 +608,8 @@ int xen_blkif_schedule(void *arg)
|
||||
struct xen_vbd *vbd = &blkif->vbd;
|
||||
unsigned long timeout;
|
||||
int ret;
|
||||
bool do_eoi;
|
||||
unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
|
||||
|
||||
set_freezable();
|
||||
while (!kthread_should_stop()) {
|
||||
@@ -632,16 +634,23 @@ int xen_blkif_schedule(void *arg)
|
||||
if (timeout == 0)
|
||||
goto purge_gnt_list;
|
||||
|
||||
do_eoi = ring->waiting_reqs;
|
||||
|
||||
ring->waiting_reqs = 0;
|
||||
smp_mb(); /* clear flag *before* checking for work */
|
||||
|
||||
ret = do_block_io_op(ring);
|
||||
ret = do_block_io_op(ring, &eoi_flags);
|
||||
if (ret > 0)
|
||||
ring->waiting_reqs = 1;
|
||||
if (ret == -EACCES)
|
||||
wait_event_interruptible(ring->shutdown_wq,
|
||||
kthread_should_stop());
|
||||
|
||||
if (do_eoi && !ring->waiting_reqs) {
|
||||
xen_irq_lateeoi(ring->irq, eoi_flags);
|
||||
eoi_flags |= XEN_EOI_FLAG_SPURIOUS;
|
||||
}
|
||||
|
||||
purge_gnt_list:
|
||||
if (blkif->vbd.feature_gnt_persistent &&
|
||||
time_after(jiffies, ring->next_lru)) {
|
||||
@@ -1117,7 +1126,7 @@ static void end_block_io_op(struct bio *bio)
|
||||
* and transmute it to the block API to hand it over to the proper block disk.
|
||||
*/
|
||||
static int
|
||||
__do_block_io_op(struct xen_blkif_ring *ring)
|
||||
__do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
|
||||
{
|
||||
union blkif_back_rings *blk_rings = &ring->blk_rings;
|
||||
struct blkif_request req;
|
||||
@@ -1140,6 +1149,9 @@ __do_block_io_op(struct xen_blkif_ring *ring)
|
||||
if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
|
||||
break;
|
||||
|
||||
/* We've seen a request, so clear spurious eoi flag. */
|
||||
*eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
|
||||
|
||||
if (kthread_should_stop()) {
|
||||
more_to_do = 1;
|
||||
break;
|
||||
@@ -1198,13 +1210,13 @@ done:
|
||||
}
|
||||
|
||||
static int
|
||||
do_block_io_op(struct xen_blkif_ring *ring)
|
||||
do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
|
||||
{
|
||||
union blkif_back_rings *blk_rings = &ring->blk_rings;
|
||||
int more_to_do;
|
||||
|
||||
do {
|
||||
more_to_do = __do_block_io_op(ring);
|
||||
more_to_do = __do_block_io_op(ring, eoi_flags);
|
||||
if (more_to_do)
|
||||
break;
|
||||
|
||||
|
||||
@@ -236,9 +236,8 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
|
||||
BUG();
|
||||
}
|
||||
|
||||
err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
|
||||
xen_blkif_be_int, 0,
|
||||
"blkif-backend", ring);
|
||||
err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->domid,
|
||||
evtchn, xen_blkif_be_int, 0, "blkif-backend", ring);
|
||||
if (err < 0) {
|
||||
xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
|
||||
ring->blk_rings.common.sring = NULL;
|
||||
|
||||
@@ -1212,7 +1212,6 @@ void add_interrupt_randomness(int irq, int irq_flags)
|
||||
|
||||
fast_mix(fast_pool);
|
||||
add_interrupt_bench(cycles);
|
||||
this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
|
||||
|
||||
if (unlikely(crng_init == 0)) {
|
||||
if ((fast_pool->count >= 64) &&
|
||||
|
||||
@@ -1053,22 +1053,19 @@ static int cik_sdma_soft_reset(void *handle)
|
||||
{
|
||||
u32 srbm_soft_reset = 0;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
u32 tmp = RREG32(mmSRBM_STATUS2);
|
||||
u32 tmp;
|
||||
|
||||
if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
|
||||
/* sdma0 */
|
||||
tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
|
||||
tmp |= SDMA0_F32_CNTL__HALT_MASK;
|
||||
WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
|
||||
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
|
||||
}
|
||||
if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
|
||||
/* sdma1 */
|
||||
tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
|
||||
tmp |= SDMA0_F32_CNTL__HALT_MASK;
|
||||
WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
|
||||
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
|
||||
}
|
||||
/* sdma0 */
|
||||
tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
|
||||
tmp |= SDMA0_F32_CNTL__HALT_MASK;
|
||||
WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
|
||||
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
|
||||
|
||||
/* sdma1 */
|
||||
tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
|
||||
tmp |= SDMA0_F32_CNTL__HALT_MASK;
|
||||
WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
|
||||
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
|
||||
|
||||
if (srbm_soft_reset) {
|
||||
tmp = RREG32(mmSRBM_SOFT_RESET);
|
||||
|
||||
@@ -350,6 +350,7 @@ int psb_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_psb_private *dev_priv = dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
unsigned int i;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
|
||||
|
||||
@@ -362,20 +363,12 @@ int psb_irq_postinstall(struct drm_device *dev)
|
||||
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
|
||||
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
|
||||
|
||||
if (dev->vblank[0].enabled)
|
||||
psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
else
|
||||
psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
|
||||
if (dev->vblank[1].enabled)
|
||||
psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
else
|
||||
psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
|
||||
if (dev->vblank[2].enabled)
|
||||
psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
else
|
||||
psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
for (i = 0; i < dev->num_crtcs; ++i) {
|
||||
if (dev->vblank[i].enabled)
|
||||
psb_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
else
|
||||
psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
}
|
||||
|
||||
if (dev_priv->ops->hotplug_enable)
|
||||
dev_priv->ops->hotplug_enable(dev, true);
|
||||
@@ -388,6 +381,7 @@ void psb_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
struct drm_psb_private *dev_priv = dev->dev_private;
|
||||
unsigned long irqflags;
|
||||
unsigned int i;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
|
||||
|
||||
@@ -396,14 +390,10 @@ void psb_irq_uninstall(struct drm_device *dev)
|
||||
|
||||
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
|
||||
|
||||
if (dev->vblank[0].enabled)
|
||||
psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
|
||||
if (dev->vblank[1].enabled)
|
||||
psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
|
||||
if (dev->vblank[2].enabled)
|
||||
psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
for (i = 0; i < dev->num_crtcs; ++i) {
|
||||
if (dev->vblank[i].enabled)
|
||||
psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
}
|
||||
|
||||
dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
|
||||
_PSB_IRQ_MSVDX_FLAG |
|
||||
|
||||
@@ -194,6 +194,7 @@ struct imx_i2c_dma {
|
||||
struct imx_i2c_struct {
|
||||
struct i2c_adapter adapter;
|
||||
struct clk *clk;
|
||||
struct notifier_block clk_change_nb;
|
||||
void __iomem *base;
|
||||
wait_queue_head_t queue;
|
||||
unsigned long i2csr;
|
||||
@@ -468,15 +469,14 @@ static int i2c_imx_acked(struct imx_i2c_struct *i2c_imx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx)
|
||||
static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
|
||||
unsigned int i2c_clk_rate)
|
||||
{
|
||||
struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div;
|
||||
unsigned int i2c_clk_rate;
|
||||
unsigned int div;
|
||||
int i;
|
||||
|
||||
/* Divider value calculation */
|
||||
i2c_clk_rate = clk_get_rate(i2c_imx->clk);
|
||||
if (i2c_imx->cur_clk == i2c_clk_rate)
|
||||
return;
|
||||
|
||||
@@ -511,6 +511,20 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx)
|
||||
#endif
|
||||
}
|
||||
|
||||
static int i2c_imx_clk_notifier_call(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
struct clk_notifier_data *ndata = data;
|
||||
struct imx_i2c_struct *i2c_imx = container_of(&ndata->clk,
|
||||
struct imx_i2c_struct,
|
||||
clk);
|
||||
|
||||
if (action & POST_RATE_CHANGE)
|
||||
i2c_imx_set_clk(i2c_imx, ndata->new_rate);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
|
||||
{
|
||||
unsigned int temp = 0;
|
||||
@@ -518,8 +532,6 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
|
||||
|
||||
dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
|
||||
|
||||
i2c_imx_set_clk(i2c_imx);
|
||||
|
||||
imx_i2c_write_reg(i2c_imx->ifdr, i2c_imx, IMX_I2C_IFDR);
|
||||
/* Enable I2C controller */
|
||||
imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR);
|
||||
@@ -1099,14 +1111,6 @@ static int i2c_imx_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Request IRQ */
|
||||
ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0,
|
||||
pdev->name, i2c_imx);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "can't claim irq %d\n", irq);
|
||||
goto clk_disable;
|
||||
}
|
||||
|
||||
/* Init queue */
|
||||
init_waitqueue_head(&i2c_imx->queue);
|
||||
|
||||
@@ -1125,12 +1129,23 @@ static int i2c_imx_probe(struct platform_device *pdev)
|
||||
if (ret < 0)
|
||||
goto rpm_disable;
|
||||
|
||||
/* Request IRQ */
|
||||
ret = request_threaded_irq(irq, i2c_imx_isr, NULL, IRQF_SHARED,
|
||||
pdev->name, i2c_imx);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "can't claim irq %d\n", irq);
|
||||
goto rpm_disable;
|
||||
}
|
||||
|
||||
/* Set up clock divider */
|
||||
i2c_imx->bitrate = IMX_I2C_BIT_RATE;
|
||||
ret = of_property_read_u32(pdev->dev.of_node,
|
||||
"clock-frequency", &i2c_imx->bitrate);
|
||||
if (ret < 0 && pdata && pdata->bitrate)
|
||||
i2c_imx->bitrate = pdata->bitrate;
|
||||
i2c_imx->clk_change_nb.notifier_call = i2c_imx_clk_notifier_call;
|
||||
clk_notifier_register(i2c_imx->clk, &i2c_imx->clk_change_nb);
|
||||
i2c_imx_set_clk(i2c_imx, clk_get_rate(i2c_imx->clk));
|
||||
|
||||
/* Set up chip registers to defaults */
|
||||
imx_i2c_write_reg(i2c_imx->hwdata->i2cr_ien_opcode ^ I2CR_IEN,
|
||||
@@ -1141,12 +1156,12 @@ static int i2c_imx_probe(struct platform_device *pdev)
|
||||
ret = i2c_imx_init_recovery_info(i2c_imx, pdev);
|
||||
/* Give it another chance if pinctrl used is not ready yet */
|
||||
if (ret == -EPROBE_DEFER)
|
||||
goto rpm_disable;
|
||||
goto clk_notifier_unregister;
|
||||
|
||||
/* Add I2C adapter */
|
||||
ret = i2c_add_numbered_adapter(&i2c_imx->adapter);
|
||||
if (ret < 0)
|
||||
goto rpm_disable;
|
||||
goto clk_notifier_unregister;
|
||||
|
||||
pm_runtime_mark_last_busy(&pdev->dev);
|
||||
pm_runtime_put_autosuspend(&pdev->dev);
|
||||
@@ -1162,13 +1177,14 @@ static int i2c_imx_probe(struct platform_device *pdev)
|
||||
|
||||
return 0; /* Return OK */
|
||||
|
||||
clk_notifier_unregister:
|
||||
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
|
||||
free_irq(irq, i2c_imx);
|
||||
rpm_disable:
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
pm_runtime_set_suspended(&pdev->dev);
|
||||
pm_runtime_dont_use_autosuspend(&pdev->dev);
|
||||
|
||||
clk_disable:
|
||||
clk_disable_unprepare(i2c_imx->clk);
|
||||
return ret;
|
||||
}
|
||||
@@ -1176,7 +1192,7 @@ clk_disable:
|
||||
static int i2c_imx_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
|
||||
int ret;
|
||||
int irq, ret;
|
||||
|
||||
ret = pm_runtime_get_sync(&pdev->dev);
|
||||
if (ret < 0)
|
||||
@@ -1195,6 +1211,10 @@ static int i2c_imx_remove(struct platform_device *pdev)
|
||||
imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
|
||||
imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
|
||||
|
||||
clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq >= 0)
|
||||
free_irq(irq, i2c_imx);
|
||||
clk_disable_unprepare(i2c_imx->clk);
|
||||
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
|
||||
@@ -96,6 +96,10 @@ static const struct chip_desc chips[] = {
|
||||
.nchans = 4,
|
||||
.muxtype = pca954x_isswi,
|
||||
},
|
||||
[pca_9546] = {
|
||||
.nchans = 4,
|
||||
.muxtype = pca954x_isswi,
|
||||
},
|
||||
[pca_9547] = {
|
||||
.nchans = 8,
|
||||
.enable = 0x8,
|
||||
@@ -113,7 +117,7 @@ static const struct i2c_device_id pca954x_id[] = {
|
||||
{ "pca9543", pca_9543 },
|
||||
{ "pca9544", pca_9544 },
|
||||
{ "pca9545", pca_9545 },
|
||||
{ "pca9546", pca_9545 },
|
||||
{ "pca9546", pca_9546 },
|
||||
{ "pca9547", pca_9547 },
|
||||
{ "pca9548", pca_9548 },
|
||||
{ }
|
||||
|
||||
@@ -115,7 +115,8 @@ static irqreturn_t sunkbd_interrupt(struct serio *serio,
|
||||
switch (data) {
|
||||
|
||||
case SUNKBD_RET_RESET:
|
||||
schedule_work(&sunkbd->tq);
|
||||
if (sunkbd->enabled)
|
||||
schedule_work(&sunkbd->tq);
|
||||
sunkbd->reset = -1;
|
||||
break;
|
||||
|
||||
@@ -216,16 +217,12 @@ static int sunkbd_initialize(struct sunkbd *sunkbd)
|
||||
}
|
||||
|
||||
/*
|
||||
* sunkbd_reinit() sets leds and beeps to a state the computer remembers they
|
||||
* were in.
|
||||
* sunkbd_set_leds_beeps() sets leds and beeps to a state the computer remembers
|
||||
* they were in.
|
||||
*/
|
||||
|
||||
static void sunkbd_reinit(struct work_struct *work)
|
||||
static void sunkbd_set_leds_beeps(struct sunkbd *sunkbd)
|
||||
{
|
||||
struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
|
||||
|
||||
wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ);
|
||||
|
||||
serio_write(sunkbd->serio, SUNKBD_CMD_SETLED);
|
||||
serio_write(sunkbd->serio,
|
||||
(!!test_bit(LED_CAPSL, sunkbd->dev->led) << 3) |
|
||||
@@ -238,11 +235,39 @@ static void sunkbd_reinit(struct work_struct *work)
|
||||
SUNKBD_CMD_BELLOFF - !!test_bit(SND_BELL, sunkbd->dev->snd));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* sunkbd_reinit() wait for the keyboard reset to complete and restores state
|
||||
* of leds and beeps.
|
||||
*/
|
||||
|
||||
static void sunkbd_reinit(struct work_struct *work)
|
||||
{
|
||||
struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
|
||||
|
||||
/*
|
||||
* It is OK that we check sunkbd->enabled without pausing serio,
|
||||
* as we only want to catch true->false transition that will
|
||||
* happen once and we will be woken up for it.
|
||||
*/
|
||||
wait_event_interruptible_timeout(sunkbd->wait,
|
||||
sunkbd->reset >= 0 || !sunkbd->enabled,
|
||||
HZ);
|
||||
|
||||
if (sunkbd->reset >= 0 && sunkbd->enabled)
|
||||
sunkbd_set_leds_beeps(sunkbd);
|
||||
}
|
||||
|
||||
static void sunkbd_enable(struct sunkbd *sunkbd, bool enable)
|
||||
{
|
||||
serio_pause_rx(sunkbd->serio);
|
||||
sunkbd->enabled = enable;
|
||||
serio_continue_rx(sunkbd->serio);
|
||||
|
||||
if (!enable) {
|
||||
wake_up_interruptible(&sunkbd->wait);
|
||||
cancel_work_sync(&sunkbd->tq);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -383,7 +383,11 @@ extern bool amd_iommu_np_cache;
|
||||
/* Only true if all IOMMUs support device IOTLBs */
|
||||
extern bool amd_iommu_iotlb_sup;
|
||||
|
||||
#define MAX_IRQS_PER_TABLE 256
|
||||
/*
|
||||
* AMD IOMMU hardware only support 512 IRTEs despite
|
||||
* the architectural limitation of 2048 entries.
|
||||
*/
|
||||
#define MAX_IRQS_PER_TABLE 512
|
||||
#define IRQ_TABLE_ALIGNMENT 128
|
||||
|
||||
struct irq_remap_table {
|
||||
|
||||
@@ -152,11 +152,11 @@ static inline u8 mei_cl_me_id(const struct mei_cl *cl)
|
||||
*
|
||||
* @cl: host client
|
||||
*
|
||||
* Return: mtu
|
||||
* Return: mtu or 0 if client is not connected
|
||||
*/
|
||||
static inline size_t mei_cl_mtu(const struct mei_cl *cl)
|
||||
{
|
||||
return cl->me_cl->props.max_msg_length;
|
||||
return cl->me_cl ? cl->me_cl->props.max_msg_length : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -469,9 +469,13 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8
|
||||
*/
|
||||
struct sk_buff *skb = priv->echo_skb[idx];
|
||||
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
|
||||
u8 len = cf->len;
|
||||
|
||||
*len_ptr = len;
|
||||
/* get the real payload length for netdev statistics */
|
||||
if (cf->can_id & CAN_RTR_FLAG)
|
||||
*len_ptr = 0;
|
||||
else
|
||||
*len_ptr = cf->len;
|
||||
|
||||
priv->echo_skb[idx] = NULL;
|
||||
|
||||
return skb;
|
||||
@@ -496,7 +500,11 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
|
||||
if (!skb)
|
||||
return 0;
|
||||
|
||||
netif_rx(skb);
|
||||
skb_get(skb);
|
||||
if (netif_rx(skb) == NET_RX_SUCCESS)
|
||||
dev_consume_skb_any(skb);
|
||||
else
|
||||
dev_kfree_skb_any(skb);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
@@ -152,14 +152,55 @@ void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
|
||||
/* protect from getting timeval before setting now */
|
||||
if (time_ref->tv_host.tv_sec > 0) {
|
||||
u64 delta_us;
|
||||
s64 delta_ts = 0;
|
||||
|
||||
delta_us = ts - time_ref->ts_dev_2;
|
||||
if (ts < time_ref->ts_dev_2)
|
||||
delta_us &= (1 << time_ref->adapter->ts_used_bits) - 1;
|
||||
/* General case: dev_ts_1 < dev_ts_2 < ts, with:
|
||||
*
|
||||
* - dev_ts_1 = previous sync timestamp
|
||||
* - dev_ts_2 = last sync timestamp
|
||||
* - ts = event timestamp
|
||||
* - ts_period = known sync period (theoretical)
|
||||
* ~ dev_ts2 - dev_ts1
|
||||
* *but*:
|
||||
*
|
||||
* - time counters wrap (see adapter->ts_used_bits)
|
||||
* - sometimes, dev_ts_1 < ts < dev_ts2
|
||||
*
|
||||
* "normal" case (sync time counters increase):
|
||||
* must take into account case when ts wraps (tsw)
|
||||
*
|
||||
* < ts_period > < >
|
||||
* | | |
|
||||
* ---+--------+----+-------0-+--+-->
|
||||
* ts_dev_1 | ts_dev_2 |
|
||||
* ts tsw
|
||||
*/
|
||||
if (time_ref->ts_dev_1 < time_ref->ts_dev_2) {
|
||||
/* case when event time (tsw) wraps */
|
||||
if (ts < time_ref->ts_dev_1)
|
||||
delta_ts = 1 << time_ref->adapter->ts_used_bits;
|
||||
|
||||
delta_us += time_ref->ts_total;
|
||||
/* Otherwise, sync time counter (ts_dev_2) has wrapped:
|
||||
* handle case when event time (tsn) hasn't.
|
||||
*
|
||||
* < ts_period > < >
|
||||
* | | |
|
||||
* ---+--------+--0-+---------+--+-->
|
||||
* ts_dev_1 | ts_dev_2 |
|
||||
* tsn ts
|
||||
*/
|
||||
} else if (time_ref->ts_dev_1 < ts) {
|
||||
delta_ts = -(1 << time_ref->adapter->ts_used_bits);
|
||||
}
|
||||
|
||||
delta_us *= time_ref->adapter->us_per_ts_scale;
|
||||
/* add delay between last sync and event timestamps */
|
||||
delta_ts += (signed int)(ts - time_ref->ts_dev_2);
|
||||
|
||||
/* add time from beginning to last sync */
|
||||
delta_ts += time_ref->ts_total;
|
||||
|
||||
/* convert ticks number into microseconds */
|
||||
delta_us = delta_ts * time_ref->adapter->us_per_ts_scale;
|
||||
delta_us >>= time_ref->adapter->us_per_ts_shift;
|
||||
|
||||
*tv = time_ref->tv_host_0;
|
||||
|
||||
@@ -475,12 +475,18 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
|
||||
struct pucan_msg *rx_msg)
|
||||
{
|
||||
struct pucan_rx_msg *rm = (struct pucan_rx_msg *)rx_msg;
|
||||
struct peak_usb_device *dev = usb_if->dev[pucan_msg_get_channel(rm)];
|
||||
struct net_device *netdev = dev->netdev;
|
||||
struct peak_usb_device *dev;
|
||||
struct net_device *netdev;
|
||||
struct canfd_frame *cfd;
|
||||
struct sk_buff *skb;
|
||||
const u16 rx_msg_flags = le16_to_cpu(rm->flags);
|
||||
|
||||
if (pucan_msg_get_channel(rm) >= ARRAY_SIZE(usb_if->dev))
|
||||
return -ENOMEM;
|
||||
|
||||
dev = usb_if->dev[pucan_msg_get_channel(rm)];
|
||||
netdev = dev->netdev;
|
||||
|
||||
if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) {
|
||||
/* CANFD frame case */
|
||||
skb = alloc_canfd_skb(netdev, &cfd);
|
||||
@@ -527,15 +533,21 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
|
||||
struct pucan_msg *rx_msg)
|
||||
{
|
||||
struct pucan_status_msg *sm = (struct pucan_status_msg *)rx_msg;
|
||||
struct peak_usb_device *dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
|
||||
struct pcan_usb_fd_device *pdev =
|
||||
container_of(dev, struct pcan_usb_fd_device, dev);
|
||||
struct pcan_usb_fd_device *pdev;
|
||||
enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
|
||||
enum can_state rx_state, tx_state;
|
||||
struct net_device *netdev = dev->netdev;
|
||||
struct peak_usb_device *dev;
|
||||
struct net_device *netdev;
|
||||
struct can_frame *cf;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (pucan_stmsg_get_channel(sm) >= ARRAY_SIZE(usb_if->dev))
|
||||
return -ENOMEM;
|
||||
|
||||
dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
|
||||
pdev = container_of(dev, struct pcan_usb_fd_device, dev);
|
||||
netdev = dev->netdev;
|
||||
|
||||
/* nothing should be sent while in BUS_OFF state */
|
||||
if (dev->can.state == CAN_STATE_BUS_OFF)
|
||||
return 0;
|
||||
@@ -588,9 +600,14 @@ static int pcan_usb_fd_decode_error(struct pcan_usb_fd_if *usb_if,
|
||||
struct pucan_msg *rx_msg)
|
||||
{
|
||||
struct pucan_error_msg *er = (struct pucan_error_msg *)rx_msg;
|
||||
struct peak_usb_device *dev = usb_if->dev[pucan_ermsg_get_channel(er)];
|
||||
struct pcan_usb_fd_device *pdev =
|
||||
container_of(dev, struct pcan_usb_fd_device, dev);
|
||||
struct pcan_usb_fd_device *pdev;
|
||||
struct peak_usb_device *dev;
|
||||
|
||||
if (pucan_ermsg_get_channel(er) >= ARRAY_SIZE(usb_if->dev))
|
||||
return -EINVAL;
|
||||
|
||||
dev = usb_if->dev[pucan_ermsg_get_channel(er)];
|
||||
pdev = container_of(dev, struct pcan_usb_fd_device, dev);
|
||||
|
||||
/* keep a trace of tx and rx error counters for later use */
|
||||
pdev->bec.txerr = er->tx_err_cnt;
|
||||
@@ -604,11 +621,17 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if,
|
||||
struct pucan_msg *rx_msg)
|
||||
{
|
||||
struct pcan_ufd_ovr_msg *ov = (struct pcan_ufd_ovr_msg *)rx_msg;
|
||||
struct peak_usb_device *dev = usb_if->dev[pufd_omsg_get_channel(ov)];
|
||||
struct net_device *netdev = dev->netdev;
|
||||
struct peak_usb_device *dev;
|
||||
struct net_device *netdev;
|
||||
struct can_frame *cf;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (pufd_omsg_get_channel(ov) >= ARRAY_SIZE(usb_if->dev))
|
||||
return -EINVAL;
|
||||
|
||||
dev = usb_if->dev[pufd_omsg_get_channel(ov)];
|
||||
netdev = dev->netdev;
|
||||
|
||||
/* allocate an skb to store the error frame */
|
||||
skb = alloc_can_err_skb(netdev, &cf);
|
||||
if (!skb)
|
||||
@@ -726,6 +749,9 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev,
|
||||
u16 tx_msg_size, tx_msg_flags;
|
||||
u8 can_dlc;
|
||||
|
||||
if (cfd->len > CANFD_MAX_DLEN)
|
||||
return -EINVAL;
|
||||
|
||||
tx_msg_size = ALIGN(sizeof(struct pucan_tx_msg) + cfd->len, 4);
|
||||
tx_msg->size = cpu_to_le16(tx_msg_size);
|
||||
tx_msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX);
|
||||
|
||||
@@ -188,7 +188,7 @@ static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
|
||||
* check for the valid queue id
|
||||
**/
|
||||
static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
|
||||
u8 qid)
|
||||
u16 qid)
|
||||
{
|
||||
struct i40e_pf *pf = vf->pf;
|
||||
struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
|
||||
@@ -203,7 +203,7 @@ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
|
||||
*
|
||||
* check for the valid vector id
|
||||
**/
|
||||
static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
|
||||
static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
|
||||
{
|
||||
struct i40e_pf *pf = vf->pf;
|
||||
|
||||
@@ -417,11 +417,28 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
|
||||
u32 v_idx, i, reg_idx, reg;
|
||||
u32 next_q_idx, next_q_type;
|
||||
u32 msix_vf, size;
|
||||
int ret = 0;
|
||||
|
||||
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
|
||||
|
||||
if (qvlist_info->num_vectors > msix_vf) {
|
||||
dev_warn(&pf->pdev->dev,
|
||||
"Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
|
||||
qvlist_info->num_vectors,
|
||||
msix_vf);
|
||||
ret = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) +
|
||||
(sizeof(struct i40e_virtchnl_iwarp_qv_info) *
|
||||
(qvlist_info->num_vectors - 1));
|
||||
kfree(vf->qvlist_info);
|
||||
vf->qvlist_info = kzalloc(size, GFP_KERNEL);
|
||||
if (!vf->qvlist_info) {
|
||||
ret = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
|
||||
|
||||
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
|
||||
@@ -432,8 +449,10 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
|
||||
v_idx = qv_info->v_idx;
|
||||
|
||||
/* Validate vector id belongs to this vf */
|
||||
if (!i40e_vc_isvalid_vector_id(vf, v_idx))
|
||||
goto err;
|
||||
if (!i40e_vc_isvalid_vector_id(vf, v_idx)) {
|
||||
ret = -EINVAL;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
vf->qvlist_info->qv_info[i] = *qv_info;
|
||||
|
||||
@@ -475,10 +494,11 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
|
||||
}
|
||||
|
||||
return 0;
|
||||
err:
|
||||
err_free:
|
||||
kfree(vf->qvlist_info);
|
||||
vf->qvlist_info = NULL;
|
||||
return -EINVAL;
|
||||
err_out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -732,7 +732,8 @@ free_dst:
|
||||
static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
|
||||
struct net_device *dev,
|
||||
struct flowi4 *fl4,
|
||||
struct ip_tunnel_info *info)
|
||||
struct ip_tunnel_info *info,
|
||||
__be16 dport, __be16 sport)
|
||||
{
|
||||
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
|
||||
struct geneve_dev *geneve = netdev_priv(dev);
|
||||
@@ -746,6 +747,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
|
||||
memset(fl4, 0, sizeof(*fl4));
|
||||
fl4->flowi4_mark = skb->mark;
|
||||
fl4->flowi4_proto = IPPROTO_UDP;
|
||||
fl4->fl4_dport = dport;
|
||||
fl4->fl4_sport = sport;
|
||||
|
||||
if (info) {
|
||||
fl4->daddr = info->key.u.ipv4.dst;
|
||||
@@ -791,7 +794,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
|
||||
static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
|
||||
struct net_device *dev,
|
||||
struct flowi6 *fl6,
|
||||
struct ip_tunnel_info *info)
|
||||
struct ip_tunnel_info *info,
|
||||
__be16 dport, __be16 sport)
|
||||
{
|
||||
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
|
||||
struct geneve_dev *geneve = netdev_priv(dev);
|
||||
@@ -807,6 +811,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
|
||||
memset(fl6, 0, sizeof(*fl6));
|
||||
fl6->flowi6_mark = skb->mark;
|
||||
fl6->flowi6_proto = IPPROTO_UDP;
|
||||
fl6->fl6_dport = dport;
|
||||
fl6->fl6_sport = sport;
|
||||
|
||||
if (info) {
|
||||
fl6->daddr = info->key.u.ipv6.dst;
|
||||
@@ -894,13 +900,14 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
||||
goto tx_error;
|
||||
}
|
||||
|
||||
rt = geneve_get_v4_rt(skb, dev, &fl4, info);
|
||||
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
|
||||
rt = geneve_get_v4_rt(skb, dev, &fl4, info,
|
||||
geneve->dst_port, sport);
|
||||
if (IS_ERR(rt)) {
|
||||
err = PTR_ERR(rt);
|
||||
goto tx_error;
|
||||
}
|
||||
|
||||
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
|
||||
skb_reset_mac_header(skb);
|
||||
|
||||
if (info) {
|
||||
@@ -983,13 +990,14 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
dst = geneve_get_v6_dst(skb, dev, &fl6, info);
|
||||
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
|
||||
dst = geneve_get_v6_dst(skb, dev, &fl6, info,
|
||||
geneve->dst_port, sport);
|
||||
if (IS_ERR(dst)) {
|
||||
err = PTR_ERR(dst);
|
||||
goto tx_error;
|
||||
}
|
||||
|
||||
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
|
||||
skb_reset_mac_header(skb);
|
||||
|
||||
if (info) {
|
||||
@@ -1114,9 +1122,14 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
|
||||
struct dst_entry *dst;
|
||||
struct flowi6 fl6;
|
||||
#endif
|
||||
__be16 sport;
|
||||
|
||||
if (ip_tunnel_info_af(info) == AF_INET) {
|
||||
rt = geneve_get_v4_rt(skb, dev, &fl4, info);
|
||||
sport = udp_flow_src_port(geneve->net, skb,
|
||||
1, USHRT_MAX, true);
|
||||
|
||||
rt = geneve_get_v4_rt(skb, dev, &fl4, info,
|
||||
geneve->dst_port, sport);
|
||||
if (IS_ERR(rt))
|
||||
return PTR_ERR(rt);
|
||||
|
||||
@@ -1124,7 +1137,11 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
|
||||
info->key.u.ipv4.src = fl4.saddr;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
} else if (ip_tunnel_info_af(info) == AF_INET6) {
|
||||
dst = geneve_get_v6_dst(skb, dev, &fl6, info);
|
||||
sport = udp_flow_src_port(geneve->net, skb,
|
||||
1, USHRT_MAX, true);
|
||||
|
||||
dst = geneve_get_v6_dst(skb, dev, &fl6, info,
|
||||
geneve->dst_port, sport);
|
||||
if (IS_ERR(dst))
|
||||
return PTR_ERR(dst);
|
||||
|
||||
@@ -1135,8 +1152,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
info->key.tp_src = udp_flow_src_port(geneve->net, skb,
|
||||
1, USHRT_MAX, true);
|
||||
info->key.tp_src = sport;
|
||||
info->key.tp_dst = geneve->dst_port;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -903,6 +903,7 @@ static ssize_t cosa_write(struct file *file,
|
||||
chan->tx_status = 1;
|
||||
spin_unlock_irqrestore(&cosa->lock, flags);
|
||||
up(&chan->wsem);
|
||||
kfree(kbuf);
|
||||
return -ERESTARTSYS;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -972,7 +972,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
|
||||
struct ath_htc_rx_status *rxstatus;
|
||||
struct ath_rx_status rx_stats;
|
||||
bool decrypt_error = false;
|
||||
__be16 rs_datalen;
|
||||
u16 rs_datalen;
|
||||
bool is_phyerr;
|
||||
|
||||
if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
|
||||
|
||||
@@ -140,6 +140,20 @@ struct xenvif_queue { /* Per-queue data for xenvif */
|
||||
char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
|
||||
struct xenvif *vif; /* Parent VIF */
|
||||
|
||||
/*
|
||||
* TX/RX common EOI handling.
|
||||
* When feature-split-event-channels = 0, interrupt handler sets
|
||||
* NETBK_COMMON_EOI, otherwise NETBK_RX_EOI and NETBK_TX_EOI are set
|
||||
* by the RX and TX interrupt handlers.
|
||||
* RX and TX handler threads will issue an EOI when either
|
||||
* NETBK_COMMON_EOI or their specific bits (NETBK_RX_EOI or
|
||||
* NETBK_TX_EOI) are set and they will reset those bits.
|
||||
*/
|
||||
atomic_t eoi_pending;
|
||||
#define NETBK_RX_EOI 0x01
|
||||
#define NETBK_TX_EOI 0x02
|
||||
#define NETBK_COMMON_EOI 0x04
|
||||
|
||||
/* Use NAPI for guest TX */
|
||||
struct napi_struct napi;
|
||||
/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
|
||||
@@ -356,6 +370,7 @@ int xenvif_dealloc_kthread(void *data);
|
||||
|
||||
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
|
||||
|
||||
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
|
||||
void xenvif_rx_action(struct xenvif_queue *queue);
|
||||
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
|
||||
|
||||
|
||||
@@ -76,12 +76,28 @@ int xenvif_schedulable(struct xenvif *vif)
|
||||
!vif->disabled;
|
||||
}
|
||||
|
||||
static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue)
|
||||
{
|
||||
bool rc;
|
||||
|
||||
rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
|
||||
if (rc)
|
||||
napi_schedule(&queue->napi);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct xenvif_queue *queue = dev_id;
|
||||
int old;
|
||||
|
||||
if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
|
||||
napi_schedule(&queue->napi);
|
||||
old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending);
|
||||
WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n");
|
||||
|
||||
if (!xenvif_handle_tx_interrupt(queue)) {
|
||||
atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending);
|
||||
xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@@ -115,19 +131,46 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
|
||||
return work_done;
|
||||
}
|
||||
|
||||
static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue)
|
||||
{
|
||||
bool rc;
|
||||
|
||||
rc = xenvif_have_rx_work(queue, false);
|
||||
if (rc)
|
||||
xenvif_kick_thread(queue);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct xenvif_queue *queue = dev_id;
|
||||
int old;
|
||||
|
||||
xenvif_kick_thread(queue);
|
||||
old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending);
|
||||
WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n");
|
||||
|
||||
if (!xenvif_handle_rx_interrupt(queue)) {
|
||||
atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending);
|
||||
xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
irqreturn_t xenvif_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
xenvif_tx_interrupt(irq, dev_id);
|
||||
xenvif_rx_interrupt(irq, dev_id);
|
||||
struct xenvif_queue *queue = dev_id;
|
||||
int old;
|
||||
|
||||
old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
|
||||
WARN(old, "Interrupt while EOI pending\n");
|
||||
|
||||
/* Use bitwise or as we need to call both functions. */
|
||||
if ((!xenvif_handle_tx_interrupt(queue) |
|
||||
!xenvif_handle_rx_interrupt(queue))) {
|
||||
atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
|
||||
xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@@ -583,7 +626,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
|
||||
shared = (struct xen_netif_ctrl_sring *)addr;
|
||||
BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
|
||||
|
||||
err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
|
||||
err = bind_interdomain_evtchn_to_irq_lateeoi(vif->domid, evtchn);
|
||||
if (err < 0)
|
||||
goto err_unmap;
|
||||
|
||||
@@ -641,7 +684,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
|
||||
|
||||
if (tx_evtchn == rx_evtchn) {
|
||||
/* feature-split-event-channels == 0 */
|
||||
err = bind_interdomain_evtchn_to_irqhandler(
|
||||
err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
|
||||
queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
|
||||
queue->name, queue);
|
||||
if (err < 0)
|
||||
@@ -652,7 +695,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
|
||||
/* feature-split-event-channels == 1 */
|
||||
snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
|
||||
"%s-tx", queue->name);
|
||||
err = bind_interdomain_evtchn_to_irqhandler(
|
||||
err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
|
||||
queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
|
||||
queue->tx_irq_name, queue);
|
||||
if (err < 0)
|
||||
@@ -662,7 +705,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
|
||||
|
||||
snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
|
||||
"%s-rx", queue->name);
|
||||
err = bind_interdomain_evtchn_to_irqhandler(
|
||||
err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
|
||||
queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
|
||||
queue->rx_irq_name, queue);
|
||||
if (err < 0)
|
||||
|
||||
@@ -162,6 +162,10 @@ void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
|
||||
|
||||
if (more_to_do)
|
||||
napi_schedule(&queue->napi);
|
||||
else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI,
|
||||
&queue->eoi_pending) &
|
||||
(NETBK_TX_EOI | NETBK_COMMON_EOI))
|
||||
xen_irq_lateeoi(queue->tx_irq, 0);
|
||||
}
|
||||
|
||||
static void tx_add_credit(struct xenvif_queue *queue)
|
||||
@@ -1615,9 +1619,14 @@ static bool xenvif_ctrl_work_todo(struct xenvif *vif)
|
||||
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
|
||||
{
|
||||
struct xenvif *vif = data;
|
||||
unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS;
|
||||
|
||||
while (xenvif_ctrl_work_todo(vif))
|
||||
while (xenvif_ctrl_work_todo(vif)) {
|
||||
xenvif_ctrl_action(vif);
|
||||
eoi_flag = 0;
|
||||
}
|
||||
|
||||
xen_irq_lateeoi(irq, eoi_flag);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@@ -490,13 +490,13 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
|
||||
return queue->stalled && prod - cons >= 1;
|
||||
}
|
||||
|
||||
static bool xenvif_have_rx_work(struct xenvif_queue *queue)
|
||||
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
|
||||
{
|
||||
return xenvif_rx_ring_slots_available(queue) ||
|
||||
(queue->vif->stall_timeout &&
|
||||
(xenvif_rx_queue_stalled(queue) ||
|
||||
xenvif_rx_queue_ready(queue))) ||
|
||||
kthread_should_stop() ||
|
||||
(test_kthread && kthread_should_stop()) ||
|
||||
queue->vif->disabled;
|
||||
}
|
||||
|
||||
@@ -527,15 +527,20 @@ static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
|
||||
{
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
if (xenvif_have_rx_work(queue))
|
||||
if (xenvif_have_rx_work(queue, true))
|
||||
return;
|
||||
|
||||
for (;;) {
|
||||
long ret;
|
||||
|
||||
prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
|
||||
if (xenvif_have_rx_work(queue))
|
||||
if (xenvif_have_rx_work(queue, true))
|
||||
break;
|
||||
if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI,
|
||||
&queue->eoi_pending) &
|
||||
(NETBK_RX_EOI | NETBK_COMMON_EOI))
|
||||
xen_irq_lateeoi(queue->rx_irq, 0);
|
||||
|
||||
ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
|
||||
if (!ret)
|
||||
break;
|
||||
|
||||
@@ -901,11 +901,13 @@ EXPORT_SYMBOL_GPL(of_dma_get_range);
|
||||
*/
|
||||
bool of_dma_is_coherent(struct device_node *np)
|
||||
{
|
||||
struct device_node *node = of_node_get(np);
|
||||
struct device_node *node;
|
||||
|
||||
if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
|
||||
return true;
|
||||
|
||||
node = of_node_get(np);
|
||||
|
||||
while (node) {
|
||||
if (of_property_read_bool(node, "dma-coherent")) {
|
||||
of_node_put(node);
|
||||
|
||||
@@ -387,13 +387,14 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
|
||||
static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr)
|
||||
{
|
||||
/*
|
||||
* The signal type is GPIO if the signal name has "GPIO" as a prefix.
|
||||
* The signal type is GPIO if the signal name has "GPI" as a prefix.
|
||||
* strncmp (rather than strcmp) is used to implement the prefix
|
||||
* requirement.
|
||||
*
|
||||
* expr->signal might look like "GPIOT3" in the GPIO case.
|
||||
* expr->signal might look like "GPIOB1" in the GPIO case.
|
||||
* expr->signal might look like "GPIT0" in the GPI case.
|
||||
*/
|
||||
return strncmp(expr->signal, "GPIO", 4) == 0;
|
||||
return strncmp(expr->signal, "GPI", 3) == 0;
|
||||
}
|
||||
|
||||
static bool aspeed_gpio_in_exprs(const struct aspeed_sig_expr **exprs)
|
||||
|
||||
@@ -40,6 +40,13 @@ struct pinctrl_dt_map {
|
||||
static void dt_free_map(struct pinctrl_dev *pctldev,
|
||||
struct pinctrl_map *map, unsigned num_maps)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_maps; ++i) {
|
||||
kfree_const(map[i].dev_name);
|
||||
map[i].dev_name = NULL;
|
||||
}
|
||||
|
||||
if (pctldev) {
|
||||
const struct pinctrl_ops *ops = pctldev->desc->pctlops;
|
||||
ops->dt_free_map(pctldev, map, num_maps);
|
||||
@@ -73,7 +80,13 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
|
||||
|
||||
/* Initialize common mapping table entry fields */
|
||||
for (i = 0; i < num_maps; i++) {
|
||||
map[i].dev_name = dev_name(p->dev);
|
||||
const char *devname;
|
||||
|
||||
devname = kstrdup_const(dev_name(p->dev), GFP_KERNEL);
|
||||
if (!devname)
|
||||
goto err_free_map;
|
||||
|
||||
map[i].dev_name = devname;
|
||||
map[i].name = statename;
|
||||
if (pctldev)
|
||||
map[i].ctrl_dev_name = dev_name(pctldev->dev);
|
||||
@@ -81,11 +94,8 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
|
||||
|
||||
/* Remember the converted mapping table entries */
|
||||
dt_map = kzalloc(sizeof(*dt_map), GFP_KERNEL);
|
||||
if (!dt_map) {
|
||||
dev_err(p->dev, "failed to alloc struct pinctrl_dt_map\n");
|
||||
dt_free_map(pctldev, map, num_maps);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!dt_map)
|
||||
goto err_free_map;
|
||||
|
||||
dt_map->pctldev = pctldev;
|
||||
dt_map->map = map;
|
||||
@@ -93,6 +103,10 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
|
||||
list_add_tail(&dt_map->node, &p->dt_maps);
|
||||
|
||||
return pinctrl_register_map(map, num_maps, false);
|
||||
|
||||
err_free_map:
|
||||
dt_free_map(pctldev, map, num_maps);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
|
||||
|
||||
@@ -140,7 +140,7 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
|
||||
pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
|
||||
pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
|
||||
} else if (debounce < 250000) {
|
||||
time = debounce / 15600;
|
||||
time = debounce / 15625;
|
||||
pin_reg |= time & DB_TMR_OUT_MASK;
|
||||
pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF);
|
||||
pin_reg |= BIT(DB_TMR_LARGE_OFF);
|
||||
@@ -150,14 +150,14 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
|
||||
pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
|
||||
pin_reg |= BIT(DB_TMR_LARGE_OFF);
|
||||
} else {
|
||||
pin_reg &= ~DB_CNTRl_MASK;
|
||||
pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
} else {
|
||||
pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF);
|
||||
pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
|
||||
pin_reg &= ~DB_TMR_OUT_MASK;
|
||||
pin_reg &= ~DB_CNTRl_MASK;
|
||||
pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
|
||||
}
|
||||
writel(pin_reg, gpio_dev->base + offset * 4);
|
||||
spin_unlock_irqrestore(&gpio_dev->lock, flags);
|
||||
|
||||
@@ -3185,6 +3185,8 @@ static int _regulator_get_voltage(struct regulator_dev *rdev)
|
||||
ret = rdev->desc->fixed_uV;
|
||||
} else if (rdev->supply) {
|
||||
ret = _regulator_get_voltage(rdev->supply->rdev);
|
||||
} else if (rdev->supply_name) {
|
||||
return -EPROBE_DEFER;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -657,8 +657,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(h,
|
||||
&tmp_pg->dh_list, node) {
|
||||
/* h->sdev should always be valid */
|
||||
BUG_ON(!h->sdev);
|
||||
if (!h->sdev)
|
||||
continue;
|
||||
h->sdev->access_state = desc[0];
|
||||
}
|
||||
rcu_read_unlock();
|
||||
@@ -704,7 +704,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
|
||||
pg->expiry = 0;
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(h, &pg->dh_list, node) {
|
||||
BUG_ON(!h->sdev);
|
||||
if (!h->sdev)
|
||||
continue;
|
||||
h->sdev->access_state =
|
||||
(pg->state & SCSI_ACCESS_STATE_MASK);
|
||||
if (pg->pref)
|
||||
@@ -1149,7 +1150,6 @@ static void alua_bus_detach(struct scsi_device *sdev)
|
||||
spin_lock(&h->pg_lock);
|
||||
pg = h->pg;
|
||||
rcu_assign_pointer(h->pg, NULL);
|
||||
h->sdev = NULL;
|
||||
spin_unlock(&h->pg_lock);
|
||||
if (pg) {
|
||||
spin_lock_irq(&pg->lock);
|
||||
@@ -1158,6 +1158,7 @@ static void alua_bus_detach(struct scsi_device *sdev)
|
||||
kref_put(&pg->kref, release_port_group);
|
||||
}
|
||||
sdev->handler_data = NULL;
|
||||
synchronize_rcu();
|
||||
kfree(h);
|
||||
}
|
||||
|
||||
|
||||
@@ -8937,7 +8937,7 @@ reinit_after_soft_reset:
|
||||
/* hook into SCSI subsystem */
|
||||
rc = hpsa_scsi_add_host(h);
|
||||
if (rc)
|
||||
goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
|
||||
goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
|
||||
|
||||
/* Monitor the controller for firmware lockups */
|
||||
h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
|
||||
@@ -8949,6 +8949,8 @@ reinit_after_soft_reset:
|
||||
h->heartbeat_sample_interval);
|
||||
return 0;
|
||||
|
||||
clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
|
||||
kfree(h->lastlogicals);
|
||||
clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
|
||||
hpsa_free_performant_mode(h);
|
||||
h->access.set_intr_mask(h, HPSA_INTR_OFF);
|
||||
|
||||
@@ -1648,6 +1648,15 @@ static const struct usb_device_id acm_ids[] = {
|
||||
{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
|
||||
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
|
||||
},
|
||||
{ USB_DEVICE(0x045b, 0x023c), /* Renesas USB Download mode */
|
||||
.driver_info = DISABLE_ECHO, /* Don't echo banner */
|
||||
},
|
||||
{ USB_DEVICE(0x045b, 0x0248), /* Renesas USB Download mode */
|
||||
.driver_info = DISABLE_ECHO, /* Don't echo banner */
|
||||
},
|
||||
{ USB_DEVICE(0x045b, 0x024D), /* Renesas USB Download mode */
|
||||
.driver_info = DISABLE_ECHO, /* Don't echo banner */
|
||||
},
|
||||
{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
|
||||
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
|
||||
},
|
||||
|
||||
@@ -1772,6 +1772,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
goto err;
|
||||
}
|
||||
|
||||
pci_set_drvdata(pdev, dev);
|
||||
spin_lock_init(&dev->lock);
|
||||
dev->pdev = pdev;
|
||||
dev->gadget.ops = &goku_ops;
|
||||
@@ -1805,7 +1806,6 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
}
|
||||
dev->regs = (struct goku_udc_regs __iomem *) base;
|
||||
|
||||
pci_set_drvdata(pdev, dev);
|
||||
INFO(dev, "%s\n", driver_desc);
|
||||
INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
|
||||
INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
|
||||
|
||||
@@ -90,6 +90,8 @@ static void evtchn_2l_unmask(unsigned port)
|
||||
|
||||
BUG_ON(!irqs_disabled());
|
||||
|
||||
smp_wmb(); /* All writes before unmask must be visible. */
|
||||
|
||||
if (unlikely((cpu != cpu_from_evtchn(port))))
|
||||
do_hypercall = 1;
|
||||
else {
|
||||
@@ -158,7 +160,7 @@ static inline xen_ulong_t active_evtchns(unsigned int cpu,
|
||||
* a bitset of words which contain pending event bits. The second
|
||||
* level is a bitset of pending events themselves.
|
||||
*/
|
||||
static void evtchn_2l_handle_events(unsigned cpu)
|
||||
static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl)
|
||||
{
|
||||
int irq;
|
||||
xen_ulong_t pending_words;
|
||||
@@ -239,10 +241,7 @@ static void evtchn_2l_handle_events(unsigned cpu)
|
||||
|
||||
/* Process port. */
|
||||
port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
|
||||
irq = get_evtchn_to_irq(port);
|
||||
|
||||
if (irq != -1)
|
||||
generic_handle_irq(irq);
|
||||
handle_irq_for_port(port, ctrl);
|
||||
|
||||
bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
|
||||
|
||||
|
||||
@@ -32,6 +32,10 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/irqnr.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/cpuhotplug.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/ktime.h>
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
#include <asm/desc.h>
|
||||
@@ -62,6 +66,15 @@
|
||||
|
||||
#include "events_internal.h"
|
||||
|
||||
#undef MODULE_PARAM_PREFIX
|
||||
#define MODULE_PARAM_PREFIX "xen."
|
||||
|
||||
static uint __read_mostly event_loop_timeout = 2;
|
||||
module_param(event_loop_timeout, uint, 0644);
|
||||
|
||||
static uint __read_mostly event_eoi_delay = 10;
|
||||
module_param(event_eoi_delay, uint, 0644);
|
||||
|
||||
const struct evtchn_ops *evtchn_ops;
|
||||
|
||||
/*
|
||||
@@ -70,6 +83,24 @@ const struct evtchn_ops *evtchn_ops;
|
||||
*/
|
||||
static DEFINE_MUTEX(irq_mapping_update_lock);
|
||||
|
||||
/*
|
||||
* Lock protecting event handling loop against removing event channels.
|
||||
* Adding of event channels is no issue as the associated IRQ becomes active
|
||||
* only after everything is setup (before request_[threaded_]irq() the handler
|
||||
* can't be entered for an event, as the event channel will be unmasked only
|
||||
* then).
|
||||
*/
|
||||
static DEFINE_RWLOCK(evtchn_rwlock);
|
||||
|
||||
/*
|
||||
* Lock hierarchy:
|
||||
*
|
||||
* irq_mapping_update_lock
|
||||
* evtchn_rwlock
|
||||
* IRQ-desc lock
|
||||
* percpu eoi_list_lock
|
||||
*/
|
||||
|
||||
static LIST_HEAD(xen_irq_list_head);
|
||||
|
||||
/* IRQ <-> VIRQ mapping. */
|
||||
@@ -94,17 +125,20 @@ static bool (*pirq_needs_eoi)(unsigned irq);
|
||||
static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY];
|
||||
|
||||
static struct irq_chip xen_dynamic_chip;
|
||||
static struct irq_chip xen_lateeoi_chip;
|
||||
static struct irq_chip xen_percpu_chip;
|
||||
static struct irq_chip xen_pirq_chip;
|
||||
static void enable_dynirq(struct irq_data *data);
|
||||
static void disable_dynirq(struct irq_data *data);
|
||||
|
||||
static DEFINE_PER_CPU(unsigned int, irq_epoch);
|
||||
|
||||
static void clear_evtchn_to_irq_row(unsigned row)
|
||||
{
|
||||
unsigned col;
|
||||
|
||||
for (col = 0; col < EVTCHN_PER_ROW; col++)
|
||||
evtchn_to_irq[row][col] = -1;
|
||||
WRITE_ONCE(evtchn_to_irq[row][col], -1);
|
||||
}
|
||||
|
||||
static void clear_evtchn_to_irq_all(void)
|
||||
@@ -141,7 +175,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
|
||||
clear_evtchn_to_irq_row(row);
|
||||
}
|
||||
|
||||
evtchn_to_irq[row][col] = irq;
|
||||
WRITE_ONCE(evtchn_to_irq[row][col], irq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -151,7 +185,7 @@ int get_evtchn_to_irq(unsigned evtchn)
|
||||
return -1;
|
||||
if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
|
||||
return -1;
|
||||
return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)];
|
||||
return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
|
||||
}
|
||||
|
||||
/* Get info for IRQ */
|
||||
@@ -260,10 +294,14 @@ static void xen_irq_info_cleanup(struct irq_info *info)
|
||||
*/
|
||||
unsigned int evtchn_from_irq(unsigned irq)
|
||||
{
|
||||
if (unlikely(WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq)))
|
||||
const struct irq_info *info = NULL;
|
||||
|
||||
if (likely(irq < nr_irqs))
|
||||
info = info_for_irq(irq);
|
||||
if (!info)
|
||||
return 0;
|
||||
|
||||
return info_for_irq(irq)->evtchn;
|
||||
return info->evtchn;
|
||||
}
|
||||
|
||||
unsigned irq_from_evtchn(unsigned int evtchn)
|
||||
@@ -382,9 +420,157 @@ void notify_remote_via_irq(int irq)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(notify_remote_via_irq);
|
||||
|
||||
struct lateeoi_work {
|
||||
struct delayed_work delayed;
|
||||
spinlock_t eoi_list_lock;
|
||||
struct list_head eoi_list;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct lateeoi_work, lateeoi);
|
||||
|
||||
static void lateeoi_list_del(struct irq_info *info)
|
||||
{
|
||||
struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&eoi->eoi_list_lock, flags);
|
||||
list_del_init(&info->eoi_list);
|
||||
spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
|
||||
}
|
||||
|
||||
static void lateeoi_list_add(struct irq_info *info)
|
||||
{
|
||||
struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
|
||||
struct irq_info *elem;
|
||||
u64 now = get_jiffies_64();
|
||||
unsigned long delay;
|
||||
unsigned long flags;
|
||||
|
||||
if (now < info->eoi_time)
|
||||
delay = info->eoi_time - now;
|
||||
else
|
||||
delay = 1;
|
||||
|
||||
spin_lock_irqsave(&eoi->eoi_list_lock, flags);
|
||||
|
||||
if (list_empty(&eoi->eoi_list)) {
|
||||
list_add(&info->eoi_list, &eoi->eoi_list);
|
||||
mod_delayed_work_on(info->eoi_cpu, system_wq,
|
||||
&eoi->delayed, delay);
|
||||
} else {
|
||||
list_for_each_entry_reverse(elem, &eoi->eoi_list, eoi_list) {
|
||||
if (elem->eoi_time <= info->eoi_time)
|
||||
break;
|
||||
}
|
||||
list_add(&info->eoi_list, &elem->eoi_list);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
|
||||
}
|
||||
|
||||
static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
|
||||
{
|
||||
evtchn_port_t evtchn;
|
||||
unsigned int cpu;
|
||||
unsigned int delay = 0;
|
||||
|
||||
evtchn = info->evtchn;
|
||||
if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list))
|
||||
return;
|
||||
|
||||
if (spurious) {
|
||||
if ((1 << info->spurious_cnt) < (HZ << 2))
|
||||
info->spurious_cnt++;
|
||||
if (info->spurious_cnt > 1) {
|
||||
delay = 1 << (info->spurious_cnt - 2);
|
||||
if (delay > HZ)
|
||||
delay = HZ;
|
||||
if (!info->eoi_time)
|
||||
info->eoi_cpu = smp_processor_id();
|
||||
info->eoi_time = get_jiffies_64() + delay;
|
||||
}
|
||||
} else {
|
||||
info->spurious_cnt = 0;
|
||||
}
|
||||
|
||||
cpu = info->eoi_cpu;
|
||||
if (info->eoi_time &&
|
||||
(info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
|
||||
lateeoi_list_add(info);
|
||||
return;
|
||||
}
|
||||
|
||||
info->eoi_time = 0;
|
||||
unmask_evtchn(evtchn);
|
||||
}
|
||||
|
||||
static void xen_irq_lateeoi_worker(struct work_struct *work)
|
||||
{
|
||||
struct lateeoi_work *eoi;
|
||||
struct irq_info *info;
|
||||
u64 now = get_jiffies_64();
|
||||
unsigned long flags;
|
||||
|
||||
eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
|
||||
|
||||
read_lock_irqsave(&evtchn_rwlock, flags);
|
||||
|
||||
while (true) {
|
||||
spin_lock(&eoi->eoi_list_lock);
|
||||
|
||||
info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
|
||||
eoi_list);
|
||||
|
||||
if (info == NULL || now < info->eoi_time) {
|
||||
spin_unlock(&eoi->eoi_list_lock);
|
||||
break;
|
||||
}
|
||||
|
||||
list_del_init(&info->eoi_list);
|
||||
|
||||
spin_unlock(&eoi->eoi_list_lock);
|
||||
|
||||
info->eoi_time = 0;
|
||||
|
||||
xen_irq_lateeoi_locked(info, false);
|
||||
}
|
||||
|
||||
if (info)
|
||||
mod_delayed_work_on(info->eoi_cpu, system_wq,
|
||||
&eoi->delayed, info->eoi_time - now);
|
||||
|
||||
read_unlock_irqrestore(&evtchn_rwlock, flags);
|
||||
}
|
||||
|
||||
static void xen_cpu_init_eoi(unsigned int cpu)
|
||||
{
|
||||
struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu);
|
||||
|
||||
INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker);
|
||||
spin_lock_init(&eoi->eoi_list_lock);
|
||||
INIT_LIST_HEAD(&eoi->eoi_list);
|
||||
}
|
||||
|
||||
void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
|
||||
{
|
||||
struct irq_info *info;
|
||||
unsigned long flags;
|
||||
|
||||
read_lock_irqsave(&evtchn_rwlock, flags);
|
||||
|
||||
info = info_for_irq(irq);
|
||||
|
||||
if (info)
|
||||
xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
|
||||
|
||||
read_unlock_irqrestore(&evtchn_rwlock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
|
||||
|
||||
static void xen_irq_init(unsigned irq)
|
||||
{
|
||||
struct irq_info *info;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* By default all event channels notify CPU#0. */
|
||||
cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0));
|
||||
@@ -399,6 +585,7 @@ static void xen_irq_init(unsigned irq)
|
||||
|
||||
set_info_for_irq(irq, info);
|
||||
|
||||
INIT_LIST_HEAD(&info->eoi_list);
|
||||
list_add_tail(&info->list, &xen_irq_list_head);
|
||||
}
|
||||
|
||||
@@ -447,16 +634,24 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
|
||||
static void xen_free_irq(unsigned irq)
|
||||
{
|
||||
struct irq_info *info = info_for_irq(irq);
|
||||
unsigned long flags;
|
||||
|
||||
if (WARN_ON(!info))
|
||||
return;
|
||||
|
||||
write_lock_irqsave(&evtchn_rwlock, flags);
|
||||
|
||||
if (!list_empty(&info->eoi_list))
|
||||
lateeoi_list_del(info);
|
||||
|
||||
list_del(&info->list);
|
||||
|
||||
set_info_for_irq(irq, NULL);
|
||||
|
||||
WARN_ON(info->refcnt > 0);
|
||||
|
||||
write_unlock_irqrestore(&evtchn_rwlock, flags);
|
||||
|
||||
kfree(info);
|
||||
|
||||
/* Legacy IRQ descriptors are managed by the arch. */
|
||||
@@ -848,7 +1043,7 @@ int xen_pirq_from_irq(unsigned irq)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
|
||||
|
||||
int bind_evtchn_to_irq(unsigned int evtchn)
|
||||
static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip)
|
||||
{
|
||||
int irq;
|
||||
int ret;
|
||||
@@ -865,7 +1060,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
|
||||
if (irq < 0)
|
||||
goto out;
|
||||
|
||||
irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
|
||||
irq_set_chip_and_handler_name(irq, chip,
|
||||
handle_edge_irq, "event");
|
||||
|
||||
ret = xen_irq_info_evtchn_setup(irq, evtchn);
|
||||
@@ -886,8 +1081,19 @@ out:
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
||||
int bind_evtchn_to_irq(evtchn_port_t evtchn)
|
||||
{
|
||||
return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
|
||||
|
||||
int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
|
||||
{
|
||||
return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
|
||||
|
||||
static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
|
||||
{
|
||||
struct evtchn_bind_ipi bind_ipi;
|
||||
@@ -929,8 +1135,9 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
|
||||
return irq;
|
||||
}
|
||||
|
||||
int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
|
||||
unsigned int remote_port)
|
||||
static int bind_interdomain_evtchn_to_irq_chip(unsigned int remote_domain,
|
||||
evtchn_port_t remote_port,
|
||||
struct irq_chip *chip)
|
||||
{
|
||||
struct evtchn_bind_interdomain bind_interdomain;
|
||||
int err;
|
||||
@@ -941,10 +1148,26 @@ int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
|
||||
err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
|
||||
&bind_interdomain);
|
||||
|
||||
return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
|
||||
return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port,
|
||||
chip);
|
||||
}
|
||||
|
||||
int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
|
||||
evtchn_port_t remote_port)
|
||||
{
|
||||
return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
|
||||
&xen_dynamic_chip);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq);
|
||||
|
||||
int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,
|
||||
evtchn_port_t remote_port)
|
||||
{
|
||||
return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
|
||||
&xen_lateeoi_chip);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
|
||||
|
||||
static int find_virq(unsigned int virq, unsigned int cpu)
|
||||
{
|
||||
struct evtchn_status status;
|
||||
@@ -1040,14 +1263,15 @@ static void unbind_from_irq(unsigned int irq)
|
||||
mutex_unlock(&irq_mapping_update_lock);
|
||||
}
|
||||
|
||||
int bind_evtchn_to_irqhandler(unsigned int evtchn,
|
||||
irq_handler_t handler,
|
||||
unsigned long irqflags,
|
||||
const char *devname, void *dev_id)
|
||||
static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,
|
||||
irq_handler_t handler,
|
||||
unsigned long irqflags,
|
||||
const char *devname, void *dev_id,
|
||||
struct irq_chip *chip)
|
||||
{
|
||||
int irq, retval;
|
||||
|
||||
irq = bind_evtchn_to_irq(evtchn);
|
||||
irq = bind_evtchn_to_irq_chip(evtchn, chip);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
retval = request_irq(irq, handler, irqflags, devname, dev_id);
|
||||
@@ -1058,31 +1282,76 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
||||
int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
|
||||
irq_handler_t handler,
|
||||
unsigned long irqflags,
|
||||
const char *devname, void *dev_id)
|
||||
{
|
||||
return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
|
||||
devname, dev_id,
|
||||
&xen_dynamic_chip);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
|
||||
|
||||
int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
|
||||
irq_handler_t handler,
|
||||
unsigned long irqflags,
|
||||
const char *devname, void *dev_id)
|
||||
{
|
||||
return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
|
||||
devname, dev_id,
|
||||
&xen_lateeoi_chip);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler_lateeoi);
|
||||
|
||||
static int bind_interdomain_evtchn_to_irqhandler_chip(
|
||||
unsigned int remote_domain, evtchn_port_t remote_port,
|
||||
irq_handler_t handler, unsigned long irqflags,
|
||||
const char *devname, void *dev_id, struct irq_chip *chip)
|
||||
{
|
||||
int irq, retval;
|
||||
|
||||
irq = bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
|
||||
chip);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
retval = request_irq(irq, handler, irqflags, devname, dev_id);
|
||||
if (retval != 0) {
|
||||
unbind_from_irq(irq);
|
||||
return retval;
|
||||
}
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
||||
int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
|
||||
unsigned int remote_port,
|
||||
evtchn_port_t remote_port,
|
||||
irq_handler_t handler,
|
||||
unsigned long irqflags,
|
||||
const char *devname,
|
||||
void *dev_id)
|
||||
{
|
||||
int irq, retval;
|
||||
|
||||
irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
retval = request_irq(irq, handler, irqflags, devname, dev_id);
|
||||
if (retval != 0) {
|
||||
unbind_from_irq(irq);
|
||||
return retval;
|
||||
}
|
||||
|
||||
return irq;
|
||||
return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain,
|
||||
remote_port, handler, irqflags, devname,
|
||||
dev_id, &xen_dynamic_chip);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
|
||||
|
||||
int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,
|
||||
evtchn_port_t remote_port,
|
||||
irq_handler_t handler,
|
||||
unsigned long irqflags,
|
||||
const char *devname,
|
||||
void *dev_id)
|
||||
{
|
||||
return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain,
|
||||
remote_port, handler, irqflags, devname,
|
||||
dev_id, &xen_lateeoi_chip);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler_lateeoi);
|
||||
|
||||
int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
|
||||
irq_handler_t handler,
|
||||
unsigned long irqflags, const char *devname, void *dev_id)
|
||||
@@ -1195,7 +1464,7 @@ int evtchn_get(unsigned int evtchn)
|
||||
goto done;
|
||||
|
||||
err = -EINVAL;
|
||||
if (info->refcnt <= 0)
|
||||
if (info->refcnt <= 0 || info->refcnt == SHRT_MAX)
|
||||
goto done;
|
||||
|
||||
info->refcnt++;
|
||||
@@ -1234,6 +1503,54 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
|
||||
notify_remote_via_irq(irq);
|
||||
}
|
||||
|
||||
struct evtchn_loop_ctrl {
|
||||
ktime_t timeout;
|
||||
unsigned count;
|
||||
bool defer_eoi;
|
||||
};
|
||||
|
||||
void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
|
||||
{
|
||||
int irq;
|
||||
struct irq_info *info;
|
||||
|
||||
irq = get_evtchn_to_irq(port);
|
||||
if (irq == -1)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Check for timeout every 256 events.
|
||||
* We are setting the timeout value only after the first 256
|
||||
* events in order to not hurt the common case of few loop
|
||||
* iterations. The 256 is basically an arbitrary value.
|
||||
*
|
||||
* In case we are hitting the timeout we need to defer all further
|
||||
* EOIs in order to ensure to leave the event handling loop rather
|
||||
* sooner than later.
|
||||
*/
|
||||
if (!ctrl->defer_eoi && !(++ctrl->count & 0xff)) {
|
||||
ktime_t kt = ktime_get();
|
||||
|
||||
if (!ctrl->timeout.tv64) {
|
||||
kt = ktime_add_ms(kt,
|
||||
jiffies_to_msecs(event_loop_timeout));
|
||||
ctrl->timeout = kt;
|
||||
} else if (kt.tv64 > ctrl->timeout.tv64) {
|
||||
ctrl->defer_eoi = true;
|
||||
}
|
||||
}
|
||||
|
||||
info = info_for_irq(irq);
|
||||
|
||||
if (ctrl->defer_eoi) {
|
||||
info->eoi_cpu = smp_processor_id();
|
||||
info->irq_epoch = __this_cpu_read(irq_epoch);
|
||||
info->eoi_time = get_jiffies_64() + event_eoi_delay;
|
||||
}
|
||||
|
||||
generic_handle_irq(irq);
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(unsigned, xed_nesting_count);
|
||||
|
||||
static void __xen_evtchn_do_upcall(void)
|
||||
@@ -1241,6 +1558,9 @@ static void __xen_evtchn_do_upcall(void)
|
||||
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
|
||||
int cpu = get_cpu();
|
||||
unsigned count;
|
||||
struct evtchn_loop_ctrl ctrl = { 0 };
|
||||
|
||||
read_lock(&evtchn_rwlock);
|
||||
|
||||
do {
|
||||
vcpu_info->evtchn_upcall_pending = 0;
|
||||
@@ -1248,7 +1568,7 @@ static void __xen_evtchn_do_upcall(void)
|
||||
if (__this_cpu_inc_return(xed_nesting_count) - 1)
|
||||
goto out;
|
||||
|
||||
xen_evtchn_handle_events(cpu);
|
||||
xen_evtchn_handle_events(cpu, &ctrl);
|
||||
|
||||
BUG_ON(!irqs_disabled());
|
||||
|
||||
@@ -1257,6 +1577,14 @@ static void __xen_evtchn_do_upcall(void)
|
||||
} while (count != 1 || vcpu_info->evtchn_upcall_pending);
|
||||
|
||||
out:
|
||||
read_unlock(&evtchn_rwlock);
|
||||
|
||||
/*
|
||||
* Increment irq_epoch only now to defer EOIs only for
|
||||
* xen_irq_lateeoi() invocations occurring from inside the loop
|
||||
* above.
|
||||
*/
|
||||
__this_cpu_inc(irq_epoch);
|
||||
|
||||
put_cpu();
|
||||
}
|
||||
@@ -1613,6 +1941,21 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
|
||||
.irq_retrigger = retrigger_dynirq,
|
||||
};
|
||||
|
||||
static struct irq_chip xen_lateeoi_chip __read_mostly = {
|
||||
/* The chip name needs to contain "xen-dyn" for irqbalance to work. */
|
||||
.name = "xen-dyn-lateeoi",
|
||||
|
||||
.irq_disable = disable_dynirq,
|
||||
.irq_mask = disable_dynirq,
|
||||
.irq_unmask = enable_dynirq,
|
||||
|
||||
.irq_ack = mask_ack_dynirq,
|
||||
.irq_mask_ack = mask_ack_dynirq,
|
||||
|
||||
.irq_set_affinity = set_affinity_irq,
|
||||
.irq_retrigger = retrigger_dynirq,
|
||||
};
|
||||
|
||||
static struct irq_chip xen_pirq_chip __read_mostly = {
|
||||
.name = "xen-pirq",
|
||||
|
||||
@@ -1680,12 +2023,31 @@ void xen_callback_vector(void)
|
||||
void xen_callback_vector(void) {}
|
||||
#endif
|
||||
|
||||
#undef MODULE_PARAM_PREFIX
|
||||
#define MODULE_PARAM_PREFIX "xen."
|
||||
|
||||
static bool fifo_events = true;
|
||||
module_param(fifo_events, bool, 0);
|
||||
|
||||
static int xen_evtchn_cpu_prepare(unsigned int cpu)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
xen_cpu_init_eoi(cpu);
|
||||
|
||||
if (evtchn_ops->percpu_init)
|
||||
ret = evtchn_ops->percpu_init(cpu);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int xen_evtchn_cpu_dead(unsigned int cpu)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (evtchn_ops->percpu_deinit)
|
||||
ret = evtchn_ops->percpu_deinit(cpu);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __init xen_init_IRQ(void)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
@@ -1695,6 +2057,12 @@ void __init xen_init_IRQ(void)
|
||||
if (ret < 0)
|
||||
xen_evtchn_2l_init();
|
||||
|
||||
xen_cpu_init_eoi(smp_processor_id());
|
||||
|
||||
cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
|
||||
"CPUHP_XEN_EVTCHN_PREPARE",
|
||||
xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
|
||||
|
||||
evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
|
||||
sizeof(*evtchn_to_irq), GFP_KERNEL);
|
||||
BUG_ON(!evtchn_to_irq);
|
||||
|
||||
@@ -227,19 +227,25 @@ static bool evtchn_fifo_is_masked(unsigned port)
|
||||
return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
|
||||
}
|
||||
/*
|
||||
* Clear MASKED, spinning if BUSY is set.
|
||||
* Clear MASKED if not PENDING, spinning if BUSY is set.
|
||||
* Return true if mask was cleared.
|
||||
*/
|
||||
static void clear_masked(volatile event_word_t *word)
|
||||
static bool clear_masked_cond(volatile event_word_t *word)
|
||||
{
|
||||
event_word_t new, old, w;
|
||||
|
||||
w = *word;
|
||||
|
||||
do {
|
||||
if (w & (1 << EVTCHN_FIFO_PENDING))
|
||||
return false;
|
||||
|
||||
old = w & ~(1 << EVTCHN_FIFO_BUSY);
|
||||
new = old & ~(1 << EVTCHN_FIFO_MASKED);
|
||||
w = sync_cmpxchg(word, old, new);
|
||||
} while (w != old);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void evtchn_fifo_unmask(unsigned port)
|
||||
@@ -248,8 +254,7 @@ static void evtchn_fifo_unmask(unsigned port)
|
||||
|
||||
BUG_ON(!irqs_disabled());
|
||||
|
||||
clear_masked(word);
|
||||
if (evtchn_fifo_is_pending(port)) {
|
||||
if (!clear_masked_cond(word)) {
|
||||
struct evtchn_unmask unmask = { .port = port };
|
||||
(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
|
||||
}
|
||||
@@ -270,19 +275,9 @@ static uint32_t clear_linked(volatile event_word_t *word)
|
||||
return w & EVTCHN_FIFO_LINK_MASK;
|
||||
}
|
||||
|
||||
static void handle_irq_for_port(unsigned port)
|
||||
{
|
||||
int irq;
|
||||
|
||||
irq = get_evtchn_to_irq(port);
|
||||
if (irq != -1)
|
||||
generic_handle_irq(irq);
|
||||
}
|
||||
|
||||
static void consume_one_event(unsigned cpu,
|
||||
static void consume_one_event(unsigned cpu, struct evtchn_loop_ctrl *ctrl,
|
||||
struct evtchn_fifo_control_block *control_block,
|
||||
unsigned priority, unsigned long *ready,
|
||||
bool drop)
|
||||
unsigned priority, unsigned long *ready)
|
||||
{
|
||||
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
|
||||
uint32_t head;
|
||||
@@ -315,16 +310,17 @@ static void consume_one_event(unsigned cpu,
|
||||
clear_bit(priority, ready);
|
||||
|
||||
if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
|
||||
if (unlikely(drop))
|
||||
if (unlikely(!ctrl))
|
||||
pr_warn("Dropping pending event for port %u\n", port);
|
||||
else
|
||||
handle_irq_for_port(port);
|
||||
handle_irq_for_port(port, ctrl);
|
||||
}
|
||||
|
||||
q->head[priority] = head;
|
||||
}
|
||||
|
||||
static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
|
||||
static void __evtchn_fifo_handle_events(unsigned cpu,
|
||||
struct evtchn_loop_ctrl *ctrl)
|
||||
{
|
||||
struct evtchn_fifo_control_block *control_block;
|
||||
unsigned long ready;
|
||||
@@ -336,14 +332,15 @@ static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
|
||||
|
||||
while (ready) {
|
||||
q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
|
||||
consume_one_event(cpu, control_block, q, &ready, drop);
|
||||
consume_one_event(cpu, ctrl, control_block, q, &ready);
|
||||
ready |= xchg(&control_block->ready, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void evtchn_fifo_handle_events(unsigned cpu)
|
||||
static void evtchn_fifo_handle_events(unsigned cpu,
|
||||
struct evtchn_loop_ctrl *ctrl)
|
||||
{
|
||||
__evtchn_fifo_handle_events(cpu, false);
|
||||
__evtchn_fifo_handle_events(cpu, ctrl);
|
||||
}
|
||||
|
||||
static void evtchn_fifo_resume(void)
|
||||
@@ -381,21 +378,6 @@ static void evtchn_fifo_resume(void)
|
||||
event_array_pages = 0;
|
||||
}
|
||||
|
||||
static const struct evtchn_ops evtchn_ops_fifo = {
|
||||
.max_channels = evtchn_fifo_max_channels,
|
||||
.nr_channels = evtchn_fifo_nr_channels,
|
||||
.setup = evtchn_fifo_setup,
|
||||
.bind_to_cpu = evtchn_fifo_bind_to_cpu,
|
||||
.clear_pending = evtchn_fifo_clear_pending,
|
||||
.set_pending = evtchn_fifo_set_pending,
|
||||
.is_pending = evtchn_fifo_is_pending,
|
||||
.test_and_set_mask = evtchn_fifo_test_and_set_mask,
|
||||
.mask = evtchn_fifo_mask,
|
||||
.unmask = evtchn_fifo_unmask,
|
||||
.handle_events = evtchn_fifo_handle_events,
|
||||
.resume = evtchn_fifo_resume,
|
||||
};
|
||||
|
||||
static int evtchn_fifo_alloc_control_block(unsigned cpu)
|
||||
{
|
||||
void *control_block = NULL;
|
||||
@@ -418,19 +400,36 @@ static int evtchn_fifo_alloc_control_block(unsigned cpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int xen_evtchn_cpu_prepare(unsigned int cpu)
|
||||
static int evtchn_fifo_percpu_init(unsigned int cpu)
|
||||
{
|
||||
if (!per_cpu(cpu_control_block, cpu))
|
||||
return evtchn_fifo_alloc_control_block(cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xen_evtchn_cpu_dead(unsigned int cpu)
|
||||
static int evtchn_fifo_percpu_deinit(unsigned int cpu)
|
||||
{
|
||||
__evtchn_fifo_handle_events(cpu, true);
|
||||
__evtchn_fifo_handle_events(cpu, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct evtchn_ops evtchn_ops_fifo = {
|
||||
.max_channels = evtchn_fifo_max_channels,
|
||||
.nr_channels = evtchn_fifo_nr_channels,
|
||||
.setup = evtchn_fifo_setup,
|
||||
.bind_to_cpu = evtchn_fifo_bind_to_cpu,
|
||||
.clear_pending = evtchn_fifo_clear_pending,
|
||||
.set_pending = evtchn_fifo_set_pending,
|
||||
.is_pending = evtchn_fifo_is_pending,
|
||||
.test_and_set_mask = evtchn_fifo_test_and_set_mask,
|
||||
.mask = evtchn_fifo_mask,
|
||||
.unmask = evtchn_fifo_unmask,
|
||||
.handle_events = evtchn_fifo_handle_events,
|
||||
.resume = evtchn_fifo_resume,
|
||||
.percpu_init = evtchn_fifo_percpu_init,
|
||||
.percpu_deinit = evtchn_fifo_percpu_deinit,
|
||||
};
|
||||
|
||||
int __init xen_evtchn_fifo_init(void)
|
||||
{
|
||||
int cpu = get_cpu();
|
||||
@@ -444,9 +443,6 @@ int __init xen_evtchn_fifo_init(void)
|
||||
|
||||
evtchn_ops = &evtchn_ops_fifo;
|
||||
|
||||
cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
|
||||
"CPUHP_XEN_EVTCHN_PREPARE",
|
||||
xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
|
||||
out:
|
||||
put_cpu();
|
||||
return ret;
|
||||
|
||||
@@ -32,11 +32,16 @@ enum xen_irq_type {
|
||||
*/
|
||||
struct irq_info {
|
||||
struct list_head list;
|
||||
int refcnt;
|
||||
struct list_head eoi_list;
|
||||
short refcnt;
|
||||
short spurious_cnt;
|
||||
enum xen_irq_type type; /* type */
|
||||
unsigned irq;
|
||||
unsigned int evtchn; /* event channel */
|
||||
unsigned short cpu; /* cpu bound */
|
||||
unsigned short eoi_cpu; /* EOI must happen on this cpu */
|
||||
unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
|
||||
u64 eoi_time; /* Time in jiffies when to EOI. */
|
||||
|
||||
union {
|
||||
unsigned short virq;
|
||||
@@ -55,6 +60,8 @@ struct irq_info {
|
||||
#define PIRQ_SHAREABLE (1 << 1)
|
||||
#define PIRQ_MSI_GROUP (1 << 2)
|
||||
|
||||
struct evtchn_loop_ctrl;
|
||||
|
||||
struct evtchn_ops {
|
||||
unsigned (*max_channels)(void);
|
||||
unsigned (*nr_channels)(void);
|
||||
@@ -69,14 +76,18 @@ struct evtchn_ops {
|
||||
void (*mask)(unsigned port);
|
||||
void (*unmask)(unsigned port);
|
||||
|
||||
void (*handle_events)(unsigned cpu);
|
||||
void (*handle_events)(unsigned cpu, struct evtchn_loop_ctrl *ctrl);
|
||||
void (*resume)(void);
|
||||
|
||||
int (*percpu_init)(unsigned int cpu);
|
||||
int (*percpu_deinit)(unsigned int cpu);
|
||||
};
|
||||
|
||||
extern const struct evtchn_ops *evtchn_ops;
|
||||
|
||||
extern int **evtchn_to_irq;
|
||||
int get_evtchn_to_irq(unsigned int evtchn);
|
||||
void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl);
|
||||
|
||||
struct irq_info *info_for_irq(unsigned irq);
|
||||
unsigned cpu_from_irq(unsigned irq);
|
||||
@@ -134,9 +145,10 @@ static inline void unmask_evtchn(unsigned port)
|
||||
return evtchn_ops->unmask(port);
|
||||
}
|
||||
|
||||
static inline void xen_evtchn_handle_events(unsigned cpu)
|
||||
static inline void xen_evtchn_handle_events(unsigned cpu,
|
||||
struct evtchn_loop_ctrl *ctrl)
|
||||
{
|
||||
return evtchn_ops->handle_events(cpu);
|
||||
return evtchn_ops->handle_events(cpu, ctrl);
|
||||
}
|
||||
|
||||
static inline void xen_evtchn_resume(void)
|
||||
|
||||
@@ -178,7 +178,6 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
|
||||
"Interrupt for port %d, but apparently not enabled; per-user %p\n",
|
||||
evtchn->port, u);
|
||||
|
||||
disable_irq_nosync(irq);
|
||||
evtchn->enabled = false;
|
||||
|
||||
spin_lock(&u->ring_prod_lock);
|
||||
@@ -304,7 +303,7 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf,
|
||||
evtchn = find_evtchn(u, port);
|
||||
if (evtchn && !evtchn->enabled) {
|
||||
evtchn->enabled = true;
|
||||
enable_irq(irq_from_evtchn(port));
|
||||
xen_irq_lateeoi(irq_from_evtchn(port), 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -404,8 +403,8 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
|
||||
if (rc < 0)
|
||||
goto err;
|
||||
|
||||
rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0,
|
||||
u->name, evtchn);
|
||||
rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, 0,
|
||||
u->name, evtchn);
|
||||
if (rc < 0)
|
||||
goto err;
|
||||
|
||||
|
||||
@@ -733,10 +733,17 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
|
||||
wmb();
|
||||
notify_remote_via_irq(pdev->evtchn_irq);
|
||||
|
||||
/* Enable IRQ to signal "request done". */
|
||||
xen_pcibk_lateeoi(pdev, 0);
|
||||
|
||||
ret = wait_event_timeout(xen_pcibk_aer_wait_queue,
|
||||
!(test_bit(_XEN_PCIB_active, (unsigned long *)
|
||||
&sh_info->flags)), 300*HZ);
|
||||
|
||||
/* Enable IRQ for pcifront request if not already active. */
|
||||
if (!test_bit(_PDEVF_op_active, &pdev->flags))
|
||||
xen_pcibk_lateeoi(pdev, 0);
|
||||
|
||||
if (!ret) {
|
||||
if (test_bit(_XEN_PCIB_active,
|
||||
(unsigned long *)&sh_info->flags)) {
|
||||
@@ -750,13 +757,6 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
|
||||
}
|
||||
clear_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
|
||||
|
||||
if (test_bit(_XEN_PCIF_active,
|
||||
(unsigned long *)&sh_info->flags)) {
|
||||
dev_dbg(&psdev->dev->dev,
|
||||
"schedule pci_conf service in " DRV_NAME "\n");
|
||||
xen_pcibk_test_and_schedule_op(psdev->pdev);
|
||||
}
|
||||
|
||||
res = (pci_ers_result_t)aer_op->err;
|
||||
return res;
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <xen/events.h>
|
||||
#include <xen/interface/io/pciif.h>
|
||||
|
||||
#define DRV_NAME "xen-pciback"
|
||||
@@ -26,6 +27,8 @@ struct pci_dev_entry {
|
||||
#define PDEVF_op_active (1<<(_PDEVF_op_active))
|
||||
#define _PCIB_op_pending (1)
|
||||
#define PCIB_op_pending (1<<(_PCIB_op_pending))
|
||||
#define _EOI_pending (2)
|
||||
#define EOI_pending (1<<(_EOI_pending))
|
||||
|
||||
struct xen_pcibk_device {
|
||||
void *pci_dev_data;
|
||||
@@ -181,12 +184,17 @@ static inline void xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
|
||||
irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id);
|
||||
void xen_pcibk_do_op(struct work_struct *data);
|
||||
|
||||
static inline void xen_pcibk_lateeoi(struct xen_pcibk_device *pdev,
|
||||
unsigned int eoi_flag)
|
||||
{
|
||||
if (test_and_clear_bit(_EOI_pending, &pdev->flags))
|
||||
xen_irq_lateeoi(pdev->evtchn_irq, eoi_flag);
|
||||
}
|
||||
|
||||
int xen_pcibk_xenbus_register(void);
|
||||
void xen_pcibk_xenbus_unregister(void);
|
||||
|
||||
extern int verbose_request;
|
||||
|
||||
void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev);
|
||||
#endif
|
||||
|
||||
/* Handles shared IRQs that can to device domain and control domain. */
|
||||
|
||||
@@ -296,26 +296,41 @@ int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline bool xen_pcibk_test_op_pending(struct xen_pcibk_device *pdev)
|
||||
{
|
||||
return test_bit(_XEN_PCIF_active,
|
||||
(unsigned long *)&pdev->sh_info->flags) &&
|
||||
!test_and_set_bit(_PDEVF_op_active, &pdev->flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Now the same evtchn is used for both pcifront conf_read_write request
|
||||
* as well as pcie aer front end ack. We use a new work_queue to schedule
|
||||
* xen_pcibk conf_read_write service for avoiding confict with aer_core
|
||||
* do_recovery job which also use the system default work_queue
|
||||
*/
|
||||
void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
|
||||
static void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
|
||||
{
|
||||
bool eoi = true;
|
||||
|
||||
/* Check that frontend is requesting an operation and that we are not
|
||||
* already processing a request */
|
||||
if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
|
||||
&& !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) {
|
||||
if (xen_pcibk_test_op_pending(pdev)) {
|
||||
schedule_work(&pdev->op_work);
|
||||
eoi = false;
|
||||
}
|
||||
/*_XEN_PCIB_active should have been cleared by pcifront. And also make
|
||||
sure xen_pcibk is waiting for ack by checking _PCIB_op_pending*/
|
||||
if (!test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags)
|
||||
&& test_bit(_PCIB_op_pending, &pdev->flags)) {
|
||||
wake_up(&xen_pcibk_aer_wait_queue);
|
||||
eoi = false;
|
||||
}
|
||||
|
||||
/* EOI if there was nothing to do. */
|
||||
if (eoi)
|
||||
xen_pcibk_lateeoi(pdev, XEN_EOI_FLAG_SPURIOUS);
|
||||
}
|
||||
|
||||
/* Performing the configuration space reads/writes must not be done in atomic
|
||||
@@ -323,10 +338,8 @@ void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
|
||||
* use of semaphores). This function is intended to be called from a work
|
||||
* queue in process context taking a struct xen_pcibk_device as a parameter */
|
||||
|
||||
void xen_pcibk_do_op(struct work_struct *data)
|
||||
static void xen_pcibk_do_one_op(struct xen_pcibk_device *pdev)
|
||||
{
|
||||
struct xen_pcibk_device *pdev =
|
||||
container_of(data, struct xen_pcibk_device, op_work);
|
||||
struct pci_dev *dev;
|
||||
struct xen_pcibk_dev_data *dev_data = NULL;
|
||||
struct xen_pci_op *op = &pdev->op;
|
||||
@@ -399,16 +412,31 @@ void xen_pcibk_do_op(struct work_struct *data)
|
||||
smp_mb__before_atomic(); /* /after/ clearing PCIF_active */
|
||||
clear_bit(_PDEVF_op_active, &pdev->flags);
|
||||
smp_mb__after_atomic(); /* /before/ final check for work */
|
||||
}
|
||||
|
||||
/* Check to see if the driver domain tried to start another request in
|
||||
* between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
|
||||
*/
|
||||
xen_pcibk_test_and_schedule_op(pdev);
|
||||
void xen_pcibk_do_op(struct work_struct *data)
|
||||
{
|
||||
struct xen_pcibk_device *pdev =
|
||||
container_of(data, struct xen_pcibk_device, op_work);
|
||||
|
||||
do {
|
||||
xen_pcibk_do_one_op(pdev);
|
||||
} while (xen_pcibk_test_op_pending(pdev));
|
||||
|
||||
xen_pcibk_lateeoi(pdev, 0);
|
||||
}
|
||||
|
||||
irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id)
|
||||
{
|
||||
struct xen_pcibk_device *pdev = dev_id;
|
||||
bool eoi;
|
||||
|
||||
/* IRQs might come in before pdev->evtchn_irq is written. */
|
||||
if (unlikely(pdev->evtchn_irq != irq))
|
||||
pdev->evtchn_irq = irq;
|
||||
|
||||
eoi = test_and_set_bit(_EOI_pending, &pdev->flags);
|
||||
WARN(eoi, "IRQ while EOI pending\n");
|
||||
|
||||
xen_pcibk_test_and_schedule_op(pdev);
|
||||
|
||||
|
||||
@@ -122,7 +122,7 @@ static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,
|
||||
|
||||
pdev->sh_info = vaddr;
|
||||
|
||||
err = bind_interdomain_evtchn_to_irqhandler(
|
||||
err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
|
||||
pdev->xdev->otherend_id, remote_evtchn, xen_pcibk_handle_event,
|
||||
0, DRV_NAME, pdev);
|
||||
if (err < 0) {
|
||||
|
||||
@@ -91,7 +91,6 @@ struct vscsibk_info {
|
||||
unsigned int irq;
|
||||
|
||||
struct vscsiif_back_ring ring;
|
||||
int ring_error;
|
||||
|
||||
spinlock_t ring_lock;
|
||||
atomic_t nr_unreplied_reqs;
|
||||
@@ -723,7 +722,8 @@ static struct vscsibk_pend *prepare_pending_reqs(struct vscsibk_info *info,
|
||||
return pending_req;
|
||||
}
|
||||
|
||||
static int scsiback_do_cmd_fn(struct vscsibk_info *info)
|
||||
static int scsiback_do_cmd_fn(struct vscsibk_info *info,
|
||||
unsigned int *eoi_flags)
|
||||
{
|
||||
struct vscsiif_back_ring *ring = &info->ring;
|
||||
struct vscsiif_request ring_req;
|
||||
@@ -740,11 +740,12 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
|
||||
rc = ring->rsp_prod_pvt;
|
||||
pr_warn("Dom%d provided bogus ring requests (%#x - %#x = %u). Halting ring processing\n",
|
||||
info->domid, rp, rc, rp - rc);
|
||||
info->ring_error = 1;
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
while ((rc != rp)) {
|
||||
*eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
|
||||
|
||||
if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
|
||||
break;
|
||||
|
||||
@@ -803,13 +804,16 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
|
||||
static irqreturn_t scsiback_irq_fn(int irq, void *dev_id)
|
||||
{
|
||||
struct vscsibk_info *info = dev_id;
|
||||
int rc;
|
||||
unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
|
||||
|
||||
if (info->ring_error)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
while (scsiback_do_cmd_fn(info))
|
||||
while ((rc = scsiback_do_cmd_fn(info, &eoi_flags)) > 0)
|
||||
cond_resched();
|
||||
|
||||
/* In case of a ring error we keep the event channel masked. */
|
||||
if (!rc)
|
||||
xen_irq_lateeoi(irq, eoi_flags);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@@ -830,7 +834,7 @@ static int scsiback_init_sring(struct vscsibk_info *info, grant_ref_t ring_ref,
|
||||
sring = (struct vscsiif_sring *)area;
|
||||
BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);
|
||||
|
||||
err = bind_interdomain_evtchn_to_irq(info->domid, evtchn);
|
||||
err = bind_interdomain_evtchn_to_irq_lateeoi(info->domid, evtchn);
|
||||
if (err < 0)
|
||||
goto unmap_page;
|
||||
|
||||
@@ -1253,7 +1257,6 @@ static int scsiback_probe(struct xenbus_device *dev,
|
||||
|
||||
info->domid = dev->otherend_id;
|
||||
spin_lock_init(&info->ring_lock);
|
||||
info->ring_error = 0;
|
||||
atomic_set(&info->nr_unreplied_reqs, 0);
|
||||
init_waitqueue_head(&info->waiting_to_free);
|
||||
info->dev = dev;
|
||||
|
||||
@@ -3874,6 +3874,10 @@ retry:
|
||||
if (!ret) {
|
||||
free_extent_buffer(eb);
|
||||
continue;
|
||||
} else if (ret < 0) {
|
||||
done = 1;
|
||||
free_extent_buffer(eb);
|
||||
break;
|
||||
}
|
||||
|
||||
ret = write_one_eb(eb, fs_info, wbc, &epd);
|
||||
|
||||
@@ -3854,6 +3854,8 @@ process_slot:
|
||||
ret = -EINTR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
|
||||
@@ -493,7 +493,13 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
|
||||
else if (map_chars == SFM_MAP_UNI_RSVD) {
|
||||
bool end_of_string;
|
||||
|
||||
if (i == srclen - 1)
|
||||
/**
|
||||
* Remap spaces and periods found at the end of every
|
||||
* component of the path. The special cases of '.' and
|
||||
* '..' do not need to be dealt with explicitly because
|
||||
* they are addressed in namei.c:link_path_walk().
|
||||
**/
|
||||
if ((i == srclen - 1) || (source[i+1] == '\\'))
|
||||
end_of_string = true;
|
||||
else
|
||||
end_of_string = false;
|
||||
|
||||
@@ -1904,6 +1904,7 @@ void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
|
||||
|
||||
ext4_write_lock_xattr(inode, &no_expand);
|
||||
if (!ext4_has_inline_data(inode)) {
|
||||
ext4_write_unlock_xattr(inode, &no_expand);
|
||||
*has_inline = 0;
|
||||
ext4_journal_stop(handle);
|
||||
return;
|
||||
|
||||
@@ -1575,8 +1575,8 @@ static const struct mount_opts {
|
||||
{Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
|
||||
EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
|
||||
MOPT_CLEAR | MOPT_Q},
|
||||
{Opt_usrjquota, 0, MOPT_Q},
|
||||
{Opt_grpjquota, 0, MOPT_Q},
|
||||
{Opt_usrjquota, 0, MOPT_Q | MOPT_STRING},
|
||||
{Opt_grpjquota, 0, MOPT_Q | MOPT_STRING},
|
||||
{Opt_offusrjquota, 0, MOPT_Q},
|
||||
{Opt_offgrpjquota, 0, MOPT_Q},
|
||||
{Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
|
||||
@@ -4337,6 +4337,7 @@ cantfind_ext4:
|
||||
#ifdef CONFIG_QUOTA
|
||||
failed_mount8:
|
||||
ext4_unregister_sysfs(sb);
|
||||
kobject_put(&sbi->s_kobj);
|
||||
#endif
|
||||
failed_mount7:
|
||||
ext4_unregister_li_request(sb);
|
||||
|
||||
@@ -758,7 +758,8 @@ again:
|
||||
}
|
||||
kfree(gl->gl_lksb.sb_lvbptr);
|
||||
kmem_cache_free(cachep, gl);
|
||||
atomic_dec(&sdp->sd_glock_disposal);
|
||||
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
|
||||
wake_up(&sdp->sd_glock_wait);
|
||||
*glp = tmp;
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -730,9 +730,9 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
|
||||
}
|
||||
|
||||
gfs2_free_clones(rgd);
|
||||
return_all_reservations(rgd);
|
||||
kfree(rgd->rd_bits);
|
||||
rgd->rd_bits = NULL;
|
||||
return_all_reservations(rgd);
|
||||
kmem_cache_free(gfs2_rgrpd_cachep, rgd);
|
||||
}
|
||||
}
|
||||
@@ -1371,6 +1371,9 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
|
||||
return -EROFS;
|
||||
|
||||
if (!blk_queue_discard(q))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
||||
@@ -1733,6 +1733,7 @@ static void ocfs2_inode_init_once(void *data)
|
||||
|
||||
oi->ip_blkno = 0ULL;
|
||||
oi->ip_clusters = 0;
|
||||
oi->ip_next_orphan = NULL;
|
||||
|
||||
ocfs2_resv_init_once(&oi->ip_la_data_resv);
|
||||
|
||||
|
||||
@@ -1318,7 +1318,7 @@ xfs_rmap_convert_shared(
|
||||
* record for our insertion point. This will also give us the record for
|
||||
* start block contiguity tests.
|
||||
*/
|
||||
error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, flags,
|
||||
error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, oldext,
|
||||
&PREV, &i);
|
||||
XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
|
||||
|
||||
|
||||
@@ -262,8 +262,8 @@ xfs_rmapbt_key_diff(
|
||||
else if (y > x)
|
||||
return -1;
|
||||
|
||||
x = XFS_RMAP_OFF(be64_to_cpu(kp->rm_offset));
|
||||
y = rec->rm_offset;
|
||||
x = be64_to_cpu(kp->rm_offset);
|
||||
y = xfs_rmap_irec_offset_pack(rec);
|
||||
if (x > y)
|
||||
return 1;
|
||||
else if (y > x)
|
||||
@@ -294,8 +294,8 @@ xfs_rmapbt_diff_two_keys(
|
||||
else if (y > x)
|
||||
return -1;
|
||||
|
||||
x = XFS_RMAP_OFF(be64_to_cpu(kp1->rm_offset));
|
||||
y = XFS_RMAP_OFF(be64_to_cpu(kp2->rm_offset));
|
||||
x = be64_to_cpu(kp1->rm_offset);
|
||||
y = be64_to_cpu(kp2->rm_offset);
|
||||
if (x > y)
|
||||
return 1;
|
||||
else if (y > x)
|
||||
@@ -401,8 +401,8 @@ xfs_rmapbt_keys_inorder(
|
||||
return 1;
|
||||
else if (a > b)
|
||||
return 0;
|
||||
a = XFS_RMAP_OFF(be64_to_cpu(k1->rmap.rm_offset));
|
||||
b = XFS_RMAP_OFF(be64_to_cpu(k2->rmap.rm_offset));
|
||||
a = be64_to_cpu(k1->rmap.rm_offset);
|
||||
b = be64_to_cpu(k2->rmap.rm_offset);
|
||||
if (a <= b)
|
||||
return 1;
|
||||
return 0;
|
||||
@@ -431,8 +431,8 @@ xfs_rmapbt_recs_inorder(
|
||||
return 1;
|
||||
else if (a > b)
|
||||
return 0;
|
||||
a = XFS_RMAP_OFF(be64_to_cpu(r1->rmap.rm_offset));
|
||||
b = XFS_RMAP_OFF(be64_to_cpu(r2->rmap.rm_offset));
|
||||
a = be64_to_cpu(r1->rmap.rm_offset);
|
||||
b = be64_to_cpu(r2->rmap.rm_offset);
|
||||
if (a <= b)
|
||||
return 1;
|
||||
return 0;
|
||||
|
||||
@@ -864,6 +864,16 @@ xfs_setattr_size(
|
||||
if (newsize > oldsize) {
|
||||
error = xfs_zero_eof(ip, newsize, oldsize, &did_zeroing);
|
||||
} else {
|
||||
/*
|
||||
* iomap won't detect a dirty page over an unwritten block (or a
|
||||
* cow block over a hole) and subsequently skips zeroing the
|
||||
* newly post-EOF portion of the page. Flush the new EOF to
|
||||
* convert the block before the pagecache truncate.
|
||||
*/
|
||||
error = filemap_write_and_wait_range(inode->i_mapping, newsize,
|
||||
newsize);
|
||||
if (error)
|
||||
return error;
|
||||
error = iomap_truncate_page(inode, newsize, &did_zeroing,
|
||||
&xfs_iomap_ops);
|
||||
}
|
||||
|
||||
@@ -144,7 +144,7 @@ xfs_fs_map_blocks(
|
||||
goto out_unlock;
|
||||
error = invalidate_inode_pages2(inode->i_mapping);
|
||||
if (WARN_ON_ONCE(error))
|
||||
return error;
|
||||
goto out_unlock;
|
||||
|
||||
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length);
|
||||
offset_fsb = XFS_B_TO_FSBT(mp, offset);
|
||||
|
||||
@@ -60,21 +60,17 @@ static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
|
||||
*/
|
||||
static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
|
||||
{
|
||||
if (skb_shared(skb)) {
|
||||
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
|
||||
struct sk_buff *nskb;
|
||||
|
||||
if (likely(nskb)) {
|
||||
can_skb_set_owner(nskb, skb->sk);
|
||||
consume_skb(skb);
|
||||
return nskb;
|
||||
} else {
|
||||
kfree_skb(skb);
|
||||
return NULL;
|
||||
}
|
||||
nskb = skb_clone(skb, GFP_ATOMIC);
|
||||
if (unlikely(!nskb)) {
|
||||
kfree_skb(skb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* we can assume to have an unshared skb with proper owner */
|
||||
return skb;
|
||||
can_skb_set_owner(nskb, skb->sk);
|
||||
consume_skb(skb);
|
||||
return nskb;
|
||||
}
|
||||
|
||||
#endif /* !_CAN_SKB_H */
|
||||
|
||||
@@ -475,7 +475,7 @@ struct pmu {
|
||||
*/
|
||||
struct perf_addr_filter {
|
||||
struct list_head entry;
|
||||
struct inode *inode;
|
||||
struct path path;
|
||||
unsigned long offset;
|
||||
unsigned long size;
|
||||
unsigned int range : 1,
|
||||
|
||||
@@ -16,12 +16,44 @@ void prandom_bytes(void *buf, size_t nbytes);
|
||||
void prandom_seed(u32 seed);
|
||||
void prandom_reseed_late(void);
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
/*
|
||||
* The core SipHash round function. Each line can be executed in
|
||||
* parallel given enough CPU resources.
|
||||
*/
|
||||
#define PRND_SIPROUND(v0, v1, v2, v3) ( \
|
||||
v0 += v1, v1 = rol64(v1, 13), v2 += v3, v3 = rol64(v3, 16), \
|
||||
v1 ^= v0, v0 = rol64(v0, 32), v3 ^= v2, \
|
||||
v0 += v3, v3 = rol64(v3, 21), v2 += v1, v1 = rol64(v1, 17), \
|
||||
v3 ^= v0, v1 ^= v2, v2 = rol64(v2, 32) \
|
||||
)
|
||||
|
||||
#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261)
|
||||
#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573)
|
||||
|
||||
#elif BITS_PER_LONG == 32
|
||||
/*
|
||||
* On 32-bit machines, we use HSipHash, a reduced-width version of SipHash.
|
||||
* This is weaker, but 32-bit machines are not used for high-traffic
|
||||
* applications, so there is less output for an attacker to analyze.
|
||||
*/
|
||||
#define PRND_SIPROUND(v0, v1, v2, v3) ( \
|
||||
v0 += v1, v1 = rol32(v1, 5), v2 += v3, v3 = rol32(v3, 8), \
|
||||
v1 ^= v0, v0 = rol32(v0, 16), v3 ^= v2, \
|
||||
v0 += v3, v3 = rol32(v3, 7), v2 += v1, v1 = rol32(v1, 13), \
|
||||
v3 ^= v0, v1 ^= v2, v2 = rol32(v2, 16) \
|
||||
)
|
||||
#define PRND_K0 0x6c796765
|
||||
#define PRND_K1 0x74656462
|
||||
|
||||
#else
|
||||
#error Unsupported BITS_PER_LONG
|
||||
#endif
|
||||
|
||||
struct rnd_state {
|
||||
__u32 s1, s2, s3, s4;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct rnd_state, net_rand_state);
|
||||
|
||||
u32 prandom_u32_state(struct rnd_state *state);
|
||||
void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
|
||||
void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
|
||||
|
||||
@@ -188,6 +188,10 @@ static inline bool timespec64_valid_strict(const struct timespec64 *ts)
|
||||
*/
|
||||
static inline s64 timespec64_to_ns(const struct timespec64 *ts)
|
||||
{
|
||||
/* Prevent multiplication overflow */
|
||||
if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
|
||||
return KTIME_MAX;
|
||||
|
||||
return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
|
||||
}
|
||||
|
||||
|
||||
@@ -12,11 +12,16 @@
|
||||
|
||||
unsigned xen_evtchn_nr_channels(void);
|
||||
|
||||
int bind_evtchn_to_irq(unsigned int evtchn);
|
||||
int bind_evtchn_to_irqhandler(unsigned int evtchn,
|
||||
int bind_evtchn_to_irq(evtchn_port_t evtchn);
|
||||
int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn);
|
||||
int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
|
||||
irq_handler_t handler,
|
||||
unsigned long irqflags, const char *devname,
|
||||
void *dev_id);
|
||||
int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
|
||||
irq_handler_t handler,
|
||||
unsigned long irqflags, const char *devname,
|
||||
void *dev_id);
|
||||
int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu);
|
||||
int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
|
||||
irq_handler_t handler,
|
||||
@@ -29,13 +34,21 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
|
||||
const char *devname,
|
||||
void *dev_id);
|
||||
int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
|
||||
unsigned int remote_port);
|
||||
evtchn_port_t remote_port);
|
||||
int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,
|
||||
evtchn_port_t remote_port);
|
||||
int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
|
||||
unsigned int remote_port,
|
||||
evtchn_port_t remote_port,
|
||||
irq_handler_t handler,
|
||||
unsigned long irqflags,
|
||||
const char *devname,
|
||||
void *dev_id);
|
||||
int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,
|
||||
evtchn_port_t remote_port,
|
||||
irq_handler_t handler,
|
||||
unsigned long irqflags,
|
||||
const char *devname,
|
||||
void *dev_id);
|
||||
|
||||
/*
|
||||
* Common unbind function for all event sources. Takes IRQ to unbind from.
|
||||
@@ -44,6 +57,14 @@ int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
|
||||
*/
|
||||
void unbind_from_irqhandler(unsigned int irq, void *dev_id);
|
||||
|
||||
/*
|
||||
* Send late EOI for an IRQ bound to an event channel via one of the *_lateeoi
|
||||
* functions above.
|
||||
*/
|
||||
void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags);
|
||||
/* Signal an event was spurious, i.e. there was no action resulting from it. */
|
||||
#define XEN_EOI_FLAG_SPURIOUS 0x00000001
|
||||
|
||||
#define XEN_IRQ_PRIORITY_MAX EVTCHN_FIFO_PRIORITY_MAX
|
||||
#define XEN_IRQ_PRIORITY_DEFAULT EVTCHN_FIFO_PRIORITY_DEFAULT
|
||||
#define XEN_IRQ_PRIORITY_MIN EVTCHN_FIFO_PRIORITY_MIN
|
||||
|
||||
@@ -5074,11 +5074,11 @@ static void perf_pmu_output_stop(struct perf_event *event);
|
||||
static void perf_mmap_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct perf_event *event = vma->vm_file->private_data;
|
||||
|
||||
struct ring_buffer *rb = ring_buffer_get(event);
|
||||
struct user_struct *mmap_user = rb->mmap_user;
|
||||
int mmap_locked = rb->mmap_locked;
|
||||
unsigned long size = perf_data_size(rb);
|
||||
bool detach_rest = false;
|
||||
|
||||
if (event->pmu->event_unmapped)
|
||||
event->pmu->event_unmapped(event);
|
||||
@@ -5109,7 +5109,8 @@ static void perf_mmap_close(struct vm_area_struct *vma)
|
||||
mutex_unlock(&event->mmap_mutex);
|
||||
}
|
||||
|
||||
atomic_dec(&rb->mmap_count);
|
||||
if (atomic_dec_and_test(&rb->mmap_count))
|
||||
detach_rest = true;
|
||||
|
||||
if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
|
||||
goto out_put;
|
||||
@@ -5118,7 +5119,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
|
||||
mutex_unlock(&event->mmap_mutex);
|
||||
|
||||
/* If there's still other mmap()s of this buffer, we're done. */
|
||||
if (atomic_read(&rb->mmap_count))
|
||||
if (!detach_rest)
|
||||
goto out_put;
|
||||
|
||||
/*
|
||||
@@ -6276,7 +6277,7 @@ static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
|
||||
|
||||
raw_spin_lock_irqsave(&ifh->lock, flags);
|
||||
list_for_each_entry(filter, &ifh->list, entry) {
|
||||
if (filter->inode) {
|
||||
if (filter->path.dentry) {
|
||||
event->addr_filters_offs[count] = 0;
|
||||
restart++;
|
||||
}
|
||||
@@ -6819,7 +6820,11 @@ static bool perf_addr_filter_match(struct perf_addr_filter *filter,
|
||||
struct file *file, unsigned long offset,
|
||||
unsigned long size)
|
||||
{
|
||||
if (filter->inode != file->f_inode)
|
||||
/* d_inode(NULL) won't be equal to any mapped user-space file */
|
||||
if (!filter->path.dentry)
|
||||
return false;
|
||||
|
||||
if (d_inode(filter->path.dentry) != file_inode(file))
|
||||
return false;
|
||||
|
||||
if (filter->offset > offset + size)
|
||||
@@ -8023,8 +8028,7 @@ static void free_filters_list(struct list_head *filters)
|
||||
struct perf_addr_filter *filter, *iter;
|
||||
|
||||
list_for_each_entry_safe(filter, iter, filters, entry) {
|
||||
if (filter->inode)
|
||||
iput(filter->inode);
|
||||
path_put(&filter->path);
|
||||
list_del(&filter->entry);
|
||||
kfree(filter);
|
||||
}
|
||||
@@ -8118,7 +8122,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
|
||||
* Adjust base offset if the filter is associated to a binary
|
||||
* that needs to be mapped:
|
||||
*/
|
||||
if (filter->inode)
|
||||
if (filter->path.dentry)
|
||||
event->addr_filters_offs[count] =
|
||||
perf_addr_filter_apply(filter, mm);
|
||||
|
||||
@@ -8191,7 +8195,6 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
|
||||
{
|
||||
struct perf_addr_filter *filter = NULL;
|
||||
char *start, *orig, *filename = NULL;
|
||||
struct path path;
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
int state = IF_STATE_ACTION, token;
|
||||
unsigned int kernel = 0;
|
||||
@@ -8254,6 +8257,7 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
|
||||
if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
|
||||
int fpos = filter->range ? 2 : 1;
|
||||
|
||||
kfree(filename);
|
||||
filename = match_strdup(&args[fpos]);
|
||||
if (!filename) {
|
||||
ret = -ENOMEM;
|
||||
@@ -8282,19 +8286,15 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
|
||||
goto fail;
|
||||
|
||||
/* look up the path and grab its inode */
|
||||
ret = kern_path(filename, LOOKUP_FOLLOW, &path);
|
||||
ret = kern_path(filename, LOOKUP_FOLLOW,
|
||||
&filter->path);
|
||||
if (ret)
|
||||
goto fail_free_name;
|
||||
|
||||
filter->inode = igrab(d_inode(path.dentry));
|
||||
path_put(&path);
|
||||
kfree(filename);
|
||||
filename = NULL;
|
||||
goto fail;
|
||||
|
||||
ret = -EINVAL;
|
||||
if (!filter->inode ||
|
||||
!S_ISREG(filter->inode->i_mode))
|
||||
/* free_filters_list() will iput() */
|
||||
if (!filter->path.dentry ||
|
||||
!S_ISREG(d_inode(filter->path.dentry)
|
||||
->i_mode))
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@@ -8307,13 +8307,13 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
|
||||
if (state != IF_STATE_ACTION)
|
||||
goto fail;
|
||||
|
||||
kfree(filename);
|
||||
kfree(orig);
|
||||
|
||||
return 0;
|
||||
|
||||
fail_free_name:
|
||||
kfree(filename);
|
||||
fail:
|
||||
kfree(filename);
|
||||
free_filters_list(filters);
|
||||
kfree(orig);
|
||||
|
||||
|
||||
@@ -212,7 +212,7 @@ static inline int get_recursion_context(int *recursion)
|
||||
rctx = 3;
|
||||
else if (in_irq())
|
||||
rctx = 2;
|
||||
else if (in_softirq())
|
||||
else if (in_serving_softirq())
|
||||
rctx = 1;
|
||||
else
|
||||
rctx = 0;
|
||||
|
||||
@@ -485,7 +485,10 @@ static void exit_mm(struct task_struct *tsk)
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
self.task = tsk;
|
||||
self.next = xchg(&core_state->dumper.next, &self);
|
||||
if (self.task->flags & PF_SIGNALED)
|
||||
self.next = xchg(&core_state->dumper.next, &self);
|
||||
else
|
||||
self.task = NULL;
|
||||
/*
|
||||
* Implies mb(), the result of xchg() must be visible
|
||||
* to core_state->dumper.
|
||||
|
||||
@@ -67,6 +67,7 @@ config IRQ_DOMAIN_HIERARCHY
|
||||
# Generic IRQ IPI support
|
||||
config GENERIC_IRQ_IPI
|
||||
bool
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
|
||||
# Generic MSI interrupt support
|
||||
config GENERIC_MSI_IRQ
|
||||
|
||||
@@ -512,22 +512,22 @@ static int __init reboot_setup(char *str)
|
||||
break;
|
||||
|
||||
case 's':
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (isdigit(*(str+1))) {
|
||||
rc = kstrtoint(str+1, 0, &reboot_cpu);
|
||||
if (rc)
|
||||
return rc;
|
||||
} else if (str[1] == 'm' && str[2] == 'p' &&
|
||||
isdigit(*(str+3))) {
|
||||
rc = kstrtoint(str+3, 0, &reboot_cpu);
|
||||
if (rc)
|
||||
return rc;
|
||||
} else
|
||||
if (isdigit(*(str+1)))
|
||||
reboot_cpu = simple_strtoul(str+1, NULL, 0);
|
||||
else if (str[1] == 'm' && str[2] == 'p' &&
|
||||
isdigit(*(str+3)))
|
||||
reboot_cpu = simple_strtoul(str+3, NULL, 0);
|
||||
else
|
||||
reboot_mode = REBOOT_SOFT;
|
||||
if (reboot_cpu >= num_possible_cpus()) {
|
||||
pr_err("Ignoring the CPU number in reboot= option. "
|
||||
"CPU %d exceeds possible cpu number %d\n",
|
||||
reboot_cpu, num_possible_cpus());
|
||||
reboot_cpu = 0;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 'g':
|
||||
reboot_mode = REBOOT_GPIO;
|
||||
break;
|
||||
|
||||
@@ -1636,13 +1636,6 @@ void update_process_times(int user_tick)
|
||||
#endif
|
||||
scheduler_tick();
|
||||
run_posix_cpu_timers(p);
|
||||
|
||||
/* The current CPU might make use of net randoms without receiving IRQs
|
||||
* to renew them often enough. Let's update the net_rand_state from a
|
||||
* non-constant value that's not affine to the number of calls to make
|
||||
* sure it's updated when there's some activity (we don't care in idle).
|
||||
*/
|
||||
this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -416,14 +416,16 @@ struct rb_event_info {
|
||||
|
||||
/*
|
||||
* Used for which event context the event is in.
|
||||
* NMI = 0
|
||||
* IRQ = 1
|
||||
* SOFTIRQ = 2
|
||||
* NORMAL = 3
|
||||
* TRANSITION = 0
|
||||
* NMI = 1
|
||||
* IRQ = 2
|
||||
* SOFTIRQ = 3
|
||||
* NORMAL = 4
|
||||
*
|
||||
* See trace_recursive_lock() comment below for more details.
|
||||
*/
|
||||
enum {
|
||||
RB_CTX_TRANSITION,
|
||||
RB_CTX_NMI,
|
||||
RB_CTX_IRQ,
|
||||
RB_CTX_SOFTIRQ,
|
||||
@@ -2579,10 +2581,10 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
|
||||
* a bit of overhead in something as critical as function tracing,
|
||||
* we use a bitmask trick.
|
||||
*
|
||||
* bit 0 = NMI context
|
||||
* bit 1 = IRQ context
|
||||
* bit 2 = SoftIRQ context
|
||||
* bit 3 = normal context.
|
||||
* bit 1 = NMI context
|
||||
* bit 2 = IRQ context
|
||||
* bit 3 = SoftIRQ context
|
||||
* bit 4 = normal context.
|
||||
*
|
||||
* This works because this is the order of contexts that can
|
||||
* preempt other contexts. A SoftIRQ never preempts an IRQ
|
||||
@@ -2605,6 +2607,30 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
|
||||
* The least significant bit can be cleared this way, and it
|
||||
* just so happens that it is the same bit corresponding to
|
||||
* the current context.
|
||||
*
|
||||
* Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
|
||||
* is set when a recursion is detected at the current context, and if
|
||||
* the TRANSITION bit is already set, it will fail the recursion.
|
||||
* This is needed because there's a lag between the changing of
|
||||
* interrupt context and updating the preempt count. In this case,
|
||||
* a false positive will be found. To handle this, one extra recursion
|
||||
* is allowed, and this is done by the TRANSITION bit. If the TRANSITION
|
||||
* bit is already set, then it is considered a recursion and the function
|
||||
* ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
|
||||
*
|
||||
* On the trace_recursive_unlock(), the TRANSITION bit will be the first
|
||||
* to be cleared. Even if it wasn't the context that set it. That is,
|
||||
* if an interrupt comes in while NORMAL bit is set and the ring buffer
|
||||
* is called before preempt_count() is updated, since the check will
|
||||
* be on the NORMAL bit, the TRANSITION bit will then be set. If an
|
||||
* NMI then comes in, it will set the NMI bit, but when the NMI code
|
||||
* does the trace_recursive_unlock() it will clear the TRANSTION bit
|
||||
* and leave the NMI bit set. But this is fine, because the interrupt
|
||||
* code that set the TRANSITION bit will then clear the NMI bit when it
|
||||
* calls trace_recursive_unlock(). If another NMI comes in, it will
|
||||
* set the TRANSITION bit and continue.
|
||||
*
|
||||
* Note: The TRANSITION bit only handles a single transition between context.
|
||||
*/
|
||||
|
||||
static __always_inline int
|
||||
@@ -2623,8 +2649,16 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
} else
|
||||
bit = RB_CTX_NORMAL;
|
||||
|
||||
if (unlikely(val & (1 << bit)))
|
||||
return 1;
|
||||
if (unlikely(val & (1 << bit))) {
|
||||
/*
|
||||
* It is possible that this was called by transitioning
|
||||
* between interrupt context, and preempt_count() has not
|
||||
* been updated yet. In this case, use the TRANSITION bit.
|
||||
*/
|
||||
bit = RB_CTX_TRANSITION;
|
||||
if (val & (1 << bit))
|
||||
return 1;
|
||||
}
|
||||
|
||||
val |= (1 << bit);
|
||||
cpu_buffer->current_context = val;
|
||||
|
||||
462
lib/random32.c
462
lib/random32.c
@@ -39,16 +39,6 @@
|
||||
#include <linux/sched.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#ifdef CONFIG_RANDOM32_SELFTEST
|
||||
static void __init prandom_state_selftest(void);
|
||||
#else
|
||||
static inline void prandom_state_selftest(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
|
||||
|
||||
/**
|
||||
* prandom_u32_state - seeded pseudo-random number generator.
|
||||
* @state: pointer to state structure holding seeded state.
|
||||
@@ -68,25 +58,6 @@ u32 prandom_u32_state(struct rnd_state *state)
|
||||
}
|
||||
EXPORT_SYMBOL(prandom_u32_state);
|
||||
|
||||
/**
|
||||
* prandom_u32 - pseudo random number generator
|
||||
*
|
||||
* A 32 bit pseudo-random number is generated using a fast
|
||||
* algorithm suitable for simulation. This algorithm is NOT
|
||||
* considered safe for cryptographic use.
|
||||
*/
|
||||
u32 prandom_u32(void)
|
||||
{
|
||||
struct rnd_state *state = &get_cpu_var(net_rand_state);
|
||||
u32 res;
|
||||
|
||||
res = prandom_u32_state(state);
|
||||
put_cpu_var(net_rand_state);
|
||||
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(prandom_u32);
|
||||
|
||||
/**
|
||||
* prandom_bytes_state - get the requested number of pseudo-random bytes
|
||||
*
|
||||
@@ -118,20 +89,6 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
|
||||
}
|
||||
EXPORT_SYMBOL(prandom_bytes_state);
|
||||
|
||||
/**
|
||||
* prandom_bytes - get the requested number of pseudo-random bytes
|
||||
* @buf: where to copy the pseudo-random bytes to
|
||||
* @bytes: the requested number of bytes
|
||||
*/
|
||||
void prandom_bytes(void *buf, size_t bytes)
|
||||
{
|
||||
struct rnd_state *state = &get_cpu_var(net_rand_state);
|
||||
|
||||
prandom_bytes_state(state, buf, bytes);
|
||||
put_cpu_var(net_rand_state);
|
||||
}
|
||||
EXPORT_SYMBOL(prandom_bytes);
|
||||
|
||||
static void prandom_warmup(struct rnd_state *state)
|
||||
{
|
||||
/* Calling RNG ten times to satisfy recurrence condition */
|
||||
@@ -147,96 +104,6 @@ static void prandom_warmup(struct rnd_state *state)
|
||||
prandom_u32_state(state);
|
||||
}
|
||||
|
||||
static u32 __extract_hwseed(void)
|
||||
{
|
||||
unsigned int val = 0;
|
||||
|
||||
(void)(arch_get_random_seed_int(&val) ||
|
||||
arch_get_random_int(&val));
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void prandom_seed_early(struct rnd_state *state, u32 seed,
|
||||
bool mix_with_hwseed)
|
||||
{
|
||||
#define LCG(x) ((x) * 69069U) /* super-duper LCG */
|
||||
#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
|
||||
state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
|
||||
state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
|
||||
state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
|
||||
state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
|
||||
}
|
||||
|
||||
/**
|
||||
* prandom_seed - add entropy to pseudo random number generator
|
||||
* @seed: seed value
|
||||
*
|
||||
* Add some additional seeding to the prandom pool.
|
||||
*/
|
||||
void prandom_seed(u32 entropy)
|
||||
{
|
||||
int i;
|
||||
/*
|
||||
* No locking on the CPUs, but then somewhat random results are, well,
|
||||
* expected.
|
||||
*/
|
||||
for_each_possible_cpu(i) {
|
||||
struct rnd_state *state = &per_cpu(net_rand_state, i);
|
||||
|
||||
state->s1 = __seed(state->s1 ^ entropy, 2U);
|
||||
prandom_warmup(state);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(prandom_seed);
|
||||
|
||||
/*
|
||||
* Generate some initially weak seeding values to allow
|
||||
* to start the prandom_u32() engine.
|
||||
*/
|
||||
static int __init prandom_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
prandom_state_selftest();
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct rnd_state *state = &per_cpu(net_rand_state, i);
|
||||
u32 weak_seed = (i + jiffies) ^ random_get_entropy();
|
||||
|
||||
prandom_seed_early(state, weak_seed, true);
|
||||
prandom_warmup(state);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
core_initcall(prandom_init);
|
||||
|
||||
static void __prandom_timer(unsigned long dontcare);
|
||||
|
||||
static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0);
|
||||
|
||||
static void __prandom_timer(unsigned long dontcare)
|
||||
{
|
||||
u32 entropy;
|
||||
unsigned long expires;
|
||||
|
||||
get_random_bytes(&entropy, sizeof(entropy));
|
||||
prandom_seed(entropy);
|
||||
|
||||
/* reseed every ~60 seconds, in [40 .. 80) interval with slack */
|
||||
expires = 40 + prandom_u32_max(40);
|
||||
seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
|
||||
|
||||
add_timer(&seed_timer);
|
||||
}
|
||||
|
||||
static void __init __prandom_start_seed_timer(void)
|
||||
{
|
||||
seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
|
||||
add_timer(&seed_timer);
|
||||
}
|
||||
|
||||
void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
|
||||
{
|
||||
int i;
|
||||
@@ -256,51 +123,6 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
|
||||
}
|
||||
EXPORT_SYMBOL(prandom_seed_full_state);
|
||||
|
||||
/*
|
||||
* Generate better values after random number generator
|
||||
* is fully initialized.
|
||||
*/
|
||||
static void __prandom_reseed(bool late)
|
||||
{
|
||||
unsigned long flags;
|
||||
static bool latch = false;
|
||||
static DEFINE_SPINLOCK(lock);
|
||||
|
||||
/* Asking for random bytes might result in bytes getting
|
||||
* moved into the nonblocking pool and thus marking it
|
||||
* as initialized. In this case we would double back into
|
||||
* this function and attempt to do a late reseed.
|
||||
* Ignore the pointless attempt to reseed again if we're
|
||||
* already waiting for bytes when the nonblocking pool
|
||||
* got initialized.
|
||||
*/
|
||||
|
||||
/* only allow initial seeding (late == false) once */
|
||||
if (!spin_trylock_irqsave(&lock, flags))
|
||||
return;
|
||||
|
||||
if (latch && !late)
|
||||
goto out;
|
||||
|
||||
latch = true;
|
||||
prandom_seed_full_state(&net_rand_state);
|
||||
out:
|
||||
spin_unlock_irqrestore(&lock, flags);
|
||||
}
|
||||
|
||||
void prandom_reseed_late(void)
|
||||
{
|
||||
__prandom_reseed(true);
|
||||
}
|
||||
|
||||
static int __init prandom_reseed(void)
|
||||
{
|
||||
__prandom_reseed(false);
|
||||
__prandom_start_seed_timer();
|
||||
return 0;
|
||||
}
|
||||
late_initcall(prandom_reseed);
|
||||
|
||||
#ifdef CONFIG_RANDOM32_SELFTEST
|
||||
static struct prandom_test1 {
|
||||
u32 seed;
|
||||
@@ -420,7 +242,28 @@ static struct prandom_test2 {
|
||||
{ 407983964U, 921U, 728767059U },
|
||||
};
|
||||
|
||||
static void __init prandom_state_selftest(void)
|
||||
static u32 __extract_hwseed(void)
|
||||
{
|
||||
unsigned int val = 0;
|
||||
|
||||
(void)(arch_get_random_seed_int(&val) ||
|
||||
arch_get_random_int(&val));
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void prandom_seed_early(struct rnd_state *state, u32 seed,
|
||||
bool mix_with_hwseed)
|
||||
{
|
||||
#define LCG(x) ((x) * 69069U) /* super-duper LCG */
|
||||
#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
|
||||
state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
|
||||
state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
|
||||
state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
|
||||
state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
|
||||
}
|
||||
|
||||
static int __init prandom_state_selftest(void)
|
||||
{
|
||||
int i, j, errors = 0, runs = 0;
|
||||
bool error = false;
|
||||
@@ -460,5 +303,266 @@ static void __init prandom_state_selftest(void)
|
||||
pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
|
||||
else
|
||||
pr_info("prandom: %d self tests passed\n", runs);
|
||||
return 0;
|
||||
}
|
||||
core_initcall(prandom_state_selftest);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The prandom_u32() implementation is now completely separate from the
|
||||
* prandom_state() functions, which are retained (for now) for compatibility.
|
||||
*
|
||||
* Because of (ab)use in the networking code for choosing random TCP/UDP port
|
||||
* numbers, which open DoS possibilities if guessable, we want something
|
||||
* stronger than a standard PRNG. But the performance requirements of
|
||||
* the network code do not allow robust crypto for this application.
|
||||
*
|
||||
* So this is a homebrew Junior Spaceman implementation, based on the
|
||||
* lowest-latency trustworthy crypto primitive available, SipHash.
|
||||
* (The authors of SipHash have not been consulted about this abuse of
|
||||
* their work.)
|
||||
*
|
||||
* Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to
|
||||
* one word of output. This abbreviated version uses 2 rounds per word
|
||||
* of output.
|
||||
*/
|
||||
|
||||
struct siprand_state {
|
||||
unsigned long v0;
|
||||
unsigned long v1;
|
||||
unsigned long v2;
|
||||
unsigned long v3;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy;
|
||||
|
||||
/*
|
||||
* This is the core CPRNG function. As "pseudorandom", this is not used
|
||||
* for truly valuable things, just intended to be a PITA to guess.
|
||||
* For maximum speed, we do just two SipHash rounds per word. This is
|
||||
* the same rate as 4 rounds per 64 bits that SipHash normally uses,
|
||||
* so hopefully it's reasonably secure.
|
||||
*
|
||||
* There are two changes from the official SipHash finalization:
|
||||
* - We omit some constants XORed with v2 in the SipHash spec as irrelevant;
|
||||
* they are there only to make the output rounds distinct from the input
|
||||
* rounds, and this application has no input rounds.
|
||||
* - Rather than returning v0^v1^v2^v3, return v1+v3.
|
||||
* If you look at the SipHash round, the last operation on v3 is
|
||||
* "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time.
|
||||
* Likewise "v1 ^= v2". (The rotate of v2 makes a difference, but
|
||||
* it still cancels out half of the bits in v2 for no benefit.)
|
||||
* Second, since the last combining operation was xor, continue the
|
||||
* pattern of alternating xor/add for a tiny bit of extra non-linearity.
|
||||
*/
|
||||
static inline u32 siprand_u32(struct siprand_state *s)
|
||||
{
|
||||
unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3;
|
||||
|
||||
PRND_SIPROUND(v0, v1, v2, v3);
|
||||
PRND_SIPROUND(v0, v1, v2, v3);
|
||||
s->v0 = v0; s->v1 = v1; s->v2 = v2; s->v3 = v3;
|
||||
return v1 + v3;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* prandom_u32 - pseudo random number generator
|
||||
*
|
||||
* A 32 bit pseudo-random number is generated using a fast
|
||||
* algorithm suitable for simulation. This algorithm is NOT
|
||||
* considered safe for cryptographic use.
|
||||
*/
|
||||
u32 prandom_u32(void)
|
||||
{
|
||||
struct siprand_state *state = get_cpu_ptr(&net_rand_state);
|
||||
u32 res = siprand_u32(state);
|
||||
|
||||
put_cpu_ptr(&net_rand_state);
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(prandom_u32);
|
||||
|
||||
/**
|
||||
* prandom_bytes - get the requested number of pseudo-random bytes
|
||||
* @buf: where to copy the pseudo-random bytes to
|
||||
* @bytes: the requested number of bytes
|
||||
*/
|
||||
void prandom_bytes(void *buf, size_t bytes)
|
||||
{
|
||||
struct siprand_state *state = get_cpu_ptr(&net_rand_state);
|
||||
u8 *ptr = buf;
|
||||
|
||||
while (bytes >= sizeof(u32)) {
|
||||
put_unaligned(siprand_u32(state), (u32 *)ptr);
|
||||
ptr += sizeof(u32);
|
||||
bytes -= sizeof(u32);
|
||||
}
|
||||
|
||||
if (bytes > 0) {
|
||||
u32 rem = siprand_u32(state);
|
||||
|
||||
do {
|
||||
*ptr++ = (u8)rem;
|
||||
rem >>= BITS_PER_BYTE;
|
||||
} while (--bytes > 0);
|
||||
}
|
||||
put_cpu_ptr(&net_rand_state);
|
||||
}
|
||||
EXPORT_SYMBOL(prandom_bytes);
|
||||
|
||||
/**
|
||||
* prandom_seed - add entropy to pseudo random number generator
|
||||
* @entropy: entropy value
|
||||
*
|
||||
* Add some additional seed material to the prandom pool.
|
||||
* The "entropy" is actually our IP address (the only caller is
|
||||
* the network code), not for unpredictability, but to ensure that
|
||||
* different machines are initialized differently.
|
||||
*/
|
||||
void prandom_seed(u32 entropy)
|
||||
{
|
||||
int i;
|
||||
|
||||
add_device_randomness(&entropy, sizeof(entropy));
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct siprand_state *state = per_cpu_ptr(&net_rand_state, i);
|
||||
unsigned long v0 = state->v0, v1 = state->v1;
|
||||
unsigned long v2 = state->v2, v3 = state->v3;
|
||||
|
||||
do {
|
||||
v3 ^= entropy;
|
||||
PRND_SIPROUND(v0, v1, v2, v3);
|
||||
PRND_SIPROUND(v0, v1, v2, v3);
|
||||
v0 ^= entropy;
|
||||
} while (unlikely(!v0 || !v1 || !v2 || !v3));
|
||||
|
||||
WRITE_ONCE(state->v0, v0);
|
||||
WRITE_ONCE(state->v1, v1);
|
||||
WRITE_ONCE(state->v2, v2);
|
||||
WRITE_ONCE(state->v3, v3);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(prandom_seed);
|
||||
|
||||
/*
|
||||
* Generate some initially weak seeding values to allow
|
||||
* the prandom_u32() engine to be started.
|
||||
*/
|
||||
static int __init prandom_init_early(void)
|
||||
{
|
||||
int i;
|
||||
unsigned long v0, v1, v2, v3;
|
||||
|
||||
if (!arch_get_random_long(&v0))
|
||||
v0 = jiffies;
|
||||
if (!arch_get_random_long(&v1))
|
||||
v1 = random_get_entropy();
|
||||
v2 = v0 ^ PRND_K0;
|
||||
v3 = v1 ^ PRND_K1;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct siprand_state *state;
|
||||
|
||||
v3 ^= i;
|
||||
PRND_SIPROUND(v0, v1, v2, v3);
|
||||
PRND_SIPROUND(v0, v1, v2, v3);
|
||||
v0 ^= i;
|
||||
|
||||
state = per_cpu_ptr(&net_rand_state, i);
|
||||
state->v0 = v0; state->v1 = v1;
|
||||
state->v2 = v2; state->v3 = v3;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
core_initcall(prandom_init_early);
|
||||
|
||||
|
||||
/* Stronger reseeding when available, and periodically thereafter. */
|
||||
static void prandom_reseed(unsigned long dontcare);
|
||||
|
||||
static DEFINE_TIMER(seed_timer, prandom_reseed, 0, 0);
|
||||
|
||||
static void prandom_reseed(unsigned long dontcare)
|
||||
{
|
||||
unsigned long expires;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Reinitialize each CPU's PRNG with 128 bits of key.
|
||||
* No locking on the CPUs, but then somewhat random results are,
|
||||
* well, expected.
|
||||
*/
|
||||
for_each_possible_cpu(i) {
|
||||
struct siprand_state *state;
|
||||
unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0;
|
||||
unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1;
|
||||
#if BITS_PER_LONG == 32
|
||||
int j;
|
||||
|
||||
/*
|
||||
* On 32-bit machines, hash in two extra words to
|
||||
* approximate 128-bit key length. Not that the hash
|
||||
* has that much security, but this prevents a trivial
|
||||
* 64-bit brute force.
|
||||
*/
|
||||
for (j = 0; j < 2; j++) {
|
||||
unsigned long m = get_random_long();
|
||||
|
||||
v3 ^= m;
|
||||
PRND_SIPROUND(v0, v1, v2, v3);
|
||||
PRND_SIPROUND(v0, v1, v2, v3);
|
||||
v0 ^= m;
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Probably impossible in practice, but there is a
|
||||
* theoretical risk that a race between this reseeding
|
||||
* and the target CPU writing its state back could
|
||||
* create the all-zero SipHash fixed point.
|
||||
*
|
||||
* To ensure that never happens, ensure the state
|
||||
* we write contains no zero words.
|
||||
*/
|
||||
state = per_cpu_ptr(&net_rand_state, i);
|
||||
WRITE_ONCE(state->v0, v0 ? v0 : -1ul);
|
||||
WRITE_ONCE(state->v1, v1 ? v1 : -1ul);
|
||||
WRITE_ONCE(state->v2, v2 ? v2 : -1ul);
|
||||
WRITE_ONCE(state->v3, v3 ? v3 : -1ul);
|
||||
}
|
||||
|
||||
/* reseed every ~60 seconds, in [40 .. 80) interval with slack */
|
||||
expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ));
|
||||
mod_timer(&seed_timer, expires);
|
||||
}
|
||||
|
||||
/*
|
||||
* The random ready callback can be called from almost any interrupt.
|
||||
* To avoid worrying about whether it's safe to delay that interrupt
|
||||
* long enough to seed all CPUs, just schedule an immediate timer event.
|
||||
*/
|
||||
static void prandom_timer_start(struct random_ready_callback *unused)
|
||||
{
|
||||
mod_timer(&seed_timer, jiffies);
|
||||
}
|
||||
|
||||
/*
|
||||
* Start periodic full reseeding as soon as strong
|
||||
* random numbers are available.
|
||||
*/
|
||||
static int __init prandom_init_late(void)
|
||||
{
|
||||
static struct random_ready_callback random_ready = {
|
||||
.func = prandom_timer_start
|
||||
};
|
||||
int ret = add_random_ready_callback(&random_ready);
|
||||
|
||||
if (ret == -EALREADY) {
|
||||
prandom_timer_start(&random_ready);
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
late_initcall(prandom_init_late);
|
||||
|
||||
@@ -199,6 +199,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
|
||||
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
|
||||
}
|
||||
io_tlb_index = 0;
|
||||
no_iotlb_memory = false;
|
||||
|
||||
if (verbose)
|
||||
swiotlb_print_info();
|
||||
@@ -229,9 +230,11 @@ swiotlb_init(int verbose)
|
||||
if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
|
||||
return;
|
||||
|
||||
if (io_tlb_start)
|
||||
if (io_tlb_start) {
|
||||
memblock_free_early(io_tlb_start,
|
||||
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
|
||||
io_tlb_start = 0;
|
||||
}
|
||||
pr_warn("Cannot allocate buffer");
|
||||
no_iotlb_memory = true;
|
||||
}
|
||||
@@ -330,6 +333,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
|
||||
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
|
||||
}
|
||||
io_tlb_index = 0;
|
||||
no_iotlb_memory = false;
|
||||
|
||||
swiotlb_print_info();
|
||||
|
||||
|
||||
@@ -487,7 +487,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
|
||||
struct queue_pages *qp = walk->private;
|
||||
unsigned long flags = qp->flags;
|
||||
int nid, ret;
|
||||
pte_t *pte;
|
||||
pte_t *pte, *mapped_pte;
|
||||
spinlock_t *ptl;
|
||||
|
||||
if (pmd_trans_huge(*pmd)) {
|
||||
@@ -515,7 +515,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
|
||||
if (pmd_trans_unstable(pmd))
|
||||
return 0;
|
||||
retry:
|
||||
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
|
||||
mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
|
||||
for (; addr != end; pte++, addr += PAGE_SIZE) {
|
||||
if (!pte_present(*pte))
|
||||
continue;
|
||||
@@ -554,7 +554,7 @@ retry:
|
||||
} else
|
||||
break;
|
||||
}
|
||||
pte_unmap_unlock(pte - 1, ptl);
|
||||
pte_unmap_unlock(mapped_pte, ptl);
|
||||
cond_resched();
|
||||
return addr != end ? -EIO : 0;
|
||||
}
|
||||
|
||||
@@ -304,7 +304,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
|
||||
__u32 cookie = ntohl(th->ack_seq) - 1;
|
||||
struct sock *ret = sk;
|
||||
struct request_sock *req;
|
||||
int mss;
|
||||
int full_space, mss;
|
||||
struct rtable *rt;
|
||||
__u8 rcv_wscale;
|
||||
struct flowi4 fl4;
|
||||
@@ -388,8 +388,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
/* Try to redo what tcp_v4_send_synack did. */
|
||||
req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
|
||||
/* limit the window selection if the user enforce a smaller rx buffer */
|
||||
full_space = tcp_full_space(sk);
|
||||
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
|
||||
(req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
|
||||
req->rsk_window_clamp = full_space;
|
||||
|
||||
tcp_select_initial_window(sock_net(sk), tcp_full_space(sk), req->mss,
|
||||
tcp_select_initial_window(sock_net(sk), full_space, req->mss,
|
||||
&req->rsk_rcv_wnd, &req->rsk_window_clamp,
|
||||
ireq->wscale_ok, &rcv_wscale,
|
||||
dst_metric(&rt->dst, RTAX_INITRWND));
|
||||
|
||||
@@ -1072,7 +1072,6 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
|
||||
if (tdev && !netif_is_l3_master(tdev)) {
|
||||
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
|
||||
|
||||
dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
|
||||
dev->mtu = tdev->mtu - t_hlen;
|
||||
if (dev->mtu < IPV6_MIN_MTU)
|
||||
dev->mtu = IPV6_MIN_MTU;
|
||||
@@ -1372,7 +1371,6 @@ static void ipip6_tunnel_setup(struct net_device *dev)
|
||||
dev->destructor = ipip6_dev_free;
|
||||
|
||||
dev->type = ARPHRD_SIT;
|
||||
dev->hard_header_len = LL_MAX_HEADER + t_hlen;
|
||||
dev->mtu = ETH_DATA_LEN - t_hlen;
|
||||
dev->flags = IFF_NOARP;
|
||||
netif_keep_dst(dev);
|
||||
|
||||
@@ -143,7 +143,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
|
||||
__u32 cookie = ntohl(th->ack_seq) - 1;
|
||||
struct sock *ret = sk;
|
||||
struct request_sock *req;
|
||||
int mss;
|
||||
int full_space, mss;
|
||||
struct dst_entry *dst;
|
||||
__u8 rcv_wscale;
|
||||
|
||||
@@ -237,7 +237,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
|
||||
tcp_select_initial_window(sock_net(sk), tcp_full_space(sk), req->mss,
|
||||
/* limit the window selection if the user enforce a smaller rx buffer */
|
||||
full_space = tcp_full_space(sk);
|
||||
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
|
||||
(req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
|
||||
req->rsk_window_clamp = full_space;
|
||||
|
||||
tcp_select_initial_window(sock_net(sk), full_space, req->mss,
|
||||
&req->rsk_rcv_wnd, &req->rsk_window_clamp,
|
||||
ireq->wscale_ok, &rcv_wscale,
|
||||
dst_metric(dst, RTAX_INITRWND));
|
||||
|
||||
@@ -1542,7 +1542,8 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
|
||||
break;
|
||||
}
|
||||
|
||||
if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
|
||||
if ((how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) &&
|
||||
sk->sk_state == IUCV_CONNECTED) {
|
||||
if (iucv->transport == AF_IUCV_TRANS_IUCV) {
|
||||
txmsg.class = 0;
|
||||
txmsg.tag = 0;
|
||||
|
||||
@@ -243,6 +243,24 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
|
||||
*/
|
||||
void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
|
||||
{
|
||||
/*
|
||||
* If we had used sta_info_pre_move_state() then we might not
|
||||
* have gone through the state transitions down again, so do
|
||||
* it here now (and warn if it's inserted).
|
||||
*
|
||||
* This will clear state such as fast TX/RX that may have been
|
||||
* allocated during state transitions.
|
||||
*/
|
||||
while (sta->sta_state > IEEE80211_STA_NONE) {
|
||||
int ret;
|
||||
|
||||
WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED));
|
||||
|
||||
ret = sta_info_move_state(sta, sta->sta_state - 1);
|
||||
if (WARN_ONCE(ret, "sta_info_move_state() returned %d\n", ret))
|
||||
break;
|
||||
}
|
||||
|
||||
if (sta->rate_ctrl)
|
||||
rate_control_free_sta(sta);
|
||||
|
||||
|
||||
@@ -1847,19 +1847,24 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
|
||||
|
||||
/* device xmit handlers */
|
||||
|
||||
enum ieee80211_encrypt {
|
||||
ENCRYPT_NO,
|
||||
ENCRYPT_MGMT,
|
||||
ENCRYPT_DATA,
|
||||
};
|
||||
|
||||
static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
|
||||
struct sk_buff *skb,
|
||||
int head_need, bool may_encrypt)
|
||||
int head_need,
|
||||
enum ieee80211_encrypt encrypt)
|
||||
{
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
struct ieee80211_hdr *hdr;
|
||||
bool enc_tailroom;
|
||||
int tail_need = 0;
|
||||
|
||||
hdr = (struct ieee80211_hdr *) skb->data;
|
||||
enc_tailroom = may_encrypt &&
|
||||
(sdata->crypto_tx_tailroom_needed_cnt ||
|
||||
ieee80211_is_mgmt(hdr->frame_control));
|
||||
enc_tailroom = encrypt == ENCRYPT_MGMT ||
|
||||
(encrypt == ENCRYPT_DATA &&
|
||||
sdata->crypto_tx_tailroom_needed_cnt);
|
||||
|
||||
if (enc_tailroom) {
|
||||
tail_need = IEEE80211_ENCRYPT_TAILROOM;
|
||||
@@ -1892,21 +1897,27 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
|
||||
int headroom;
|
||||
bool may_encrypt;
|
||||
enum ieee80211_encrypt encrypt;
|
||||
|
||||
may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT);
|
||||
if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)
|
||||
encrypt = ENCRYPT_NO;
|
||||
else if (ieee80211_is_mgmt(hdr->frame_control))
|
||||
encrypt = ENCRYPT_MGMT;
|
||||
else
|
||||
encrypt = ENCRYPT_DATA;
|
||||
|
||||
headroom = local->tx_headroom;
|
||||
if (may_encrypt)
|
||||
if (encrypt != ENCRYPT_NO)
|
||||
headroom += sdata->encrypt_headroom;
|
||||
headroom -= skb_headroom(skb);
|
||||
headroom = max_t(int, 0, headroom);
|
||||
|
||||
if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) {
|
||||
if (ieee80211_skb_resize(sdata, skb, headroom, encrypt)) {
|
||||
ieee80211_free_txskb(&local->hw, skb);
|
||||
return;
|
||||
}
|
||||
|
||||
/* reload after potential resize */
|
||||
hdr = (struct ieee80211_hdr *) skb->data;
|
||||
info->control.vif = &sdata->vif;
|
||||
|
||||
@@ -2688,7 +2699,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
|
||||
head_need += sdata->encrypt_headroom;
|
||||
head_need += local->tx_headroom;
|
||||
head_need = max_t(int, 0, head_need);
|
||||
if (ieee80211_skb_resize(sdata, skb, head_need, true)) {
|
||||
if (ieee80211_skb_resize(sdata, skb, head_need, ENCRYPT_DATA)) {
|
||||
ieee80211_free_txskb(&local->hw, skb);
|
||||
skb = NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@@ -3313,7 +3324,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
|
||||
if (unlikely(ieee80211_skb_resize(sdata, skb,
|
||||
max_t(int, extra_head + hw_headroom -
|
||||
skb_headroom(skb), 0),
|
||||
false))) {
|
||||
ENCRYPT_NO))) {
|
||||
kfree_skb(skb);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -2759,7 +2759,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
|
||||
power_rule = ®_rule->power_rule;
|
||||
|
||||
if (reg_rule->flags & NL80211_RRF_AUTO_BW)
|
||||
snprintf(bw, sizeof(bw), "%d KHz, %d KHz AUTO",
|
||||
snprintf(bw, sizeof(bw), "%d KHz, %u KHz AUTO",
|
||||
freq_range->max_bandwidth_khz,
|
||||
reg_get_max_bandwidth(rd, reg_rule));
|
||||
else
|
||||
|
||||
@@ -823,7 +823,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
|
||||
sock->state = SS_CONNECTED;
|
||||
rc = 0;
|
||||
out_put_neigh:
|
||||
if (rc) {
|
||||
if (rc && x25->neighbour) {
|
||||
read_lock_bh(&x25_list_lock);
|
||||
x25_neigh_put(x25->neighbour);
|
||||
x25->neighbour = NULL;
|
||||
|
||||
@@ -1613,6 +1613,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
|
||||
int err = -ENOENT;
|
||||
__be32 minspi = htonl(low);
|
||||
__be32 maxspi = htonl(high);
|
||||
__be32 newspi = 0;
|
||||
u32 mark = x->mark.v & x->mark.m;
|
||||
|
||||
spin_lock_bh(&x->lock);
|
||||
@@ -1631,21 +1632,22 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
|
||||
xfrm_state_put(x0);
|
||||
goto unlock;
|
||||
}
|
||||
x->id.spi = minspi;
|
||||
newspi = minspi;
|
||||
} else {
|
||||
u32 spi = 0;
|
||||
for (h = 0; h < high-low+1; h++) {
|
||||
spi = low + prandom_u32()%(high-low+1);
|
||||
x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
|
||||
if (x0 == NULL) {
|
||||
x->id.spi = htonl(spi);
|
||||
newspi = htonl(spi);
|
||||
break;
|
||||
}
|
||||
xfrm_state_put(x0);
|
||||
}
|
||||
}
|
||||
if (x->id.spi) {
|
||||
if (newspi) {
|
||||
spin_lock_bh(&net->xfrm.xfrm_state_lock);
|
||||
x->id.spi = newspi;
|
||||
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
|
||||
hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
|
||||
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user