Merge 4.14.97 into android-4.14-p
Changes in 4.14.97 amd-xgbe: Fix mdio access for non-zero ports and clause 45 PHYs net: bridge: Fix ethernet header pointer before check skb forwardable net: Fix usage of pskb_trim_rcsum net: phy: mdio_bus: add missing device_del() in mdiobus_register() error handling net_sched: refetch skb protocol for each filter openvswitch: Avoid OOB read when parsing flow nlattrs vhost: log dirty page correctly net: ipv4: Fix memory leak in network namespace dismantle tcp: allow MSG_ZEROCOPY transmission also in CLOSE_WAIT state ipfrag: really prevent allocation on netns exit mmc: Kconfig: Enable CONFIG_MMC_SDHCI_IO_ACCESSORS mei: me: add denverton innovation engine device IDs USB: serial: simple: add Motorola Tetra TPG2200 device id USB: serial: pl2303: add new PID to support PL2303TB ASoC: atom: fix a missing check of snd_pcm_lib_malloc_pages ASoC: rt5514-spi: Fix potential NULL pointer dereference ALSA: hda - Add mute LED support for HP ProBook 470 G5 ARCv2: lib: memeset: fix doing prefetchw outside of buffer ARC: adjust memblock_reserve of kernel memory ARC: perf: map generic branches to correct hardware condition s390/early: improve machine detection s390/smp: fix CPU hotplug deadlock with CPU rescan char/mwave: fix potential Spectre v1 vulnerability staging: rtl8188eu: Add device code for D-Link DWA-121 rev B1 tty: Handle problem if line discipline does not have receive_buf uart: Fix crash in uart_write and uart_put_char tty/n_hdlc: fix __might_sleep warning hv_balloon: avoid touching uninitialized struct page during tail onlining Drivers: hv: vmbus: Check for ring when getting debug info CIFS: Fix possible hang during async MTU reads and writes CIFS: Fix credits calculations for reads with errors CIFS: Fix credit calculation for encrypted reads with errors CIFS: Do not reconnect TCP session in add_credits() Input: xpad - add support for SteelSeries Stratus Duo compiler.h: enable builtin overflow checkers and add fallback code Input: uinput - fix undefined behavior in uinput_validate_absinfo() acpi/nfit: Block function zero DSMs acpi/nfit: Fix command-supported detection dm thin: fix passdown_double_checking_shared_status() dm crypt: fix parsing of extended IV arguments KVM: x86: Fix single-step debugging x86/pkeys: Properly copy pkey state at fork() x86/selftests/pkeys: Fork() to check for state being preserved x86/kaslr: Fix incorrect i8254 outb() parameters posix-cpu-timers: Unbreak timer rearming irqchip/gic-v3-its: Align PCI Multi-MSI allocation on their size can: dev: __can_get_echo_skb(): fix bogous check for non-existing skb by removing it can: bcm: check timer values before ktime conversion vt: invoke notifier on screen size change perf unwind: Unwind with libdw doesn't take symfs into account perf unwind: Take pgoff into account when reporting elf to libdwfl Revert "seccomp: add a selftest for get_metadata" net: stmmac: Use correct values in TQS/RQS fields KVM: x86: Fix a 4.14 backport regression related to userspace/guest FPU s390/smp: Fix calling smp_call_ipl_cpu() from ipl CPU nvmet-rdma: Add unlikely for response allocated check nvmet-rdma: fix null dereference under heavy load usb: dwc3: gadget: Clear req->needs_extra_trb flag on cleanup xhci: Fix leaking USB3 shared_hcd at xhci removal ptp_kvm: probe for kvm guest availability x86/pvclock: add setter for pvclock_pvti_cpu0_va x86/xen/time: set pvclock flags on xen_time_init() x86/xen/time: setup vcpu 0 time info page x86/xen/time: Output xen sched_clock time from 0 xen: Fix x86 sched_clock() interface for xen f2fs: read page index before freeing btrfs: fix error handling in btrfs_dev_replace_start btrfs: dev-replace: go back to suspended state if target device is missing Linux 4.14.97 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 96
|
||||
SUBLEVEL = 97
|
||||
EXTRAVERSION =
|
||||
NAME = Petit Gorille
|
||||
|
||||
|
||||
@@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = {
|
||||
|
||||
/* counts condition */
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
|
||||
/* All jump instructions that are taken */
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
|
||||
[PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
|
||||
#ifdef CONFIG_ISA_ARCV2
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
|
||||
|
||||
@@ -7,11 +7,39 @@
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/cache.h>
|
||||
|
||||
#undef PREALLOC_NOT_AVAIL
|
||||
/*
|
||||
* The memset implementation below is optimized to use prefetchw and prealloc
|
||||
* instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
|
||||
* If you want to implement optimized memset for other possible L1 data cache
|
||||
* line lengths (32B and 128B) you should rewrite code carefully checking
|
||||
* we don't call any prefetchw/prealloc instruction for L1 cache lines which
|
||||
* don't belongs to memset area.
|
||||
*/
|
||||
|
||||
#if L1_CACHE_SHIFT == 6
|
||||
|
||||
.macro PREALLOC_INSTR reg, off
|
||||
prealloc [\reg, \off]
|
||||
.endm
|
||||
|
||||
.macro PREFETCHW_INSTR reg, off
|
||||
prefetchw [\reg, \off]
|
||||
.endm
|
||||
|
||||
#else
|
||||
|
||||
.macro PREALLOC_INSTR
|
||||
.endm
|
||||
|
||||
.macro PREFETCHW_INSTR
|
||||
.endm
|
||||
|
||||
#endif
|
||||
|
||||
ENTRY_CFI(memset)
|
||||
prefetchw [r0] ; Prefetch the write location
|
||||
PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
|
||||
mov.f 0, r2
|
||||
;;; if size is zero
|
||||
jz.d [blink]
|
||||
@@ -48,11 +76,8 @@ ENTRY_CFI(memset)
|
||||
|
||||
lpnz @.Lset64bytes
|
||||
;; LOOP START
|
||||
#ifdef PREALLOC_NOT_AVAIL
|
||||
prefetchw [r3, 64] ;Prefetch the next write location
|
||||
#else
|
||||
prealloc [r3, 64]
|
||||
#endif
|
||||
PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching
|
||||
|
||||
#ifdef CONFIG_ARC_HAS_LL64
|
||||
std.ab r4, [r3, 8]
|
||||
std.ab r4, [r3, 8]
|
||||
@@ -85,7 +110,6 @@ ENTRY_CFI(memset)
|
||||
lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
|
||||
lpnz .Lset32bytes
|
||||
;; LOOP START
|
||||
prefetchw [r3, 32] ;Prefetch the next write location
|
||||
#ifdef CONFIG_ARC_HAS_LL64
|
||||
std.ab r4, [r3, 8]
|
||||
std.ab r4, [r3, 8]
|
||||
|
||||
@@ -138,7 +138,8 @@ void __init setup_arch_memory(void)
|
||||
*/
|
||||
|
||||
memblock_add_node(low_mem_start, low_mem_sz, 0);
|
||||
memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
|
||||
memblock_reserve(CONFIG_LINUX_LINK_BASE,
|
||||
__pa(_end) - CONFIG_LINUX_LINK_BASE);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
if (initrd_start)
|
||||
|
||||
@@ -226,10 +226,10 @@ static noinline __init void detect_machine_type(void)
|
||||
if (stsi(vmms, 3, 2, 2) || !vmms->count)
|
||||
return;
|
||||
|
||||
/* Running under KVM? If not we assume z/VM */
|
||||
/* Detect known hypervisors */
|
||||
if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
|
||||
else
|
||||
else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
|
||||
}
|
||||
|
||||
|
||||
@@ -884,6 +884,8 @@ void __init setup_arch(char **cmdline_p)
|
||||
pr_info("Linux is running under KVM in 64-bit mode\n");
|
||||
else if (MACHINE_IS_LPAR)
|
||||
pr_info("Linux is running natively in 64-bit mode\n");
|
||||
else
|
||||
pr_info("Linux is running as a guest in 64-bit mode\n");
|
||||
|
||||
/* Have one command line that is parsed and saved in /proc/cmdline */
|
||||
/* boot_command_line has been already set up in early.c */
|
||||
|
||||
@@ -387,9 +387,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
|
||||
*/
|
||||
void smp_call_ipl_cpu(void (*func)(void *), void *data)
|
||||
{
|
||||
struct lowcore *lc = pcpu_devices->lowcore;
|
||||
|
||||
if (pcpu_devices[0].address == stap())
|
||||
lc = &S390_lowcore;
|
||||
|
||||
pcpu_delegate(&pcpu_devices[0], func, data,
|
||||
pcpu_devices->lowcore->panic_stack -
|
||||
PANIC_FRAME_OFFSET + PAGE_SIZE);
|
||||
lc->panic_stack - PANIC_FRAME_OFFSET + PAGE_SIZE);
|
||||
}
|
||||
|
||||
int smp_find_processor_id(u16 address)
|
||||
@@ -1168,7 +1172,11 @@ static ssize_t __ref rescan_store(struct device *dev,
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = lock_device_hotplug_sysfs();
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = smp_rescan_cpus();
|
||||
unlock_device_hotplug();
|
||||
return rc ? rc : count;
|
||||
}
|
||||
static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
|
||||
|
||||
@@ -112,7 +112,7 @@ static int vvar_fault(const struct vm_special_mapping *sm,
|
||||
__pa_symbol(&__vvar_page) >> PAGE_SHIFT);
|
||||
} else if (sym_offset == image->sym_pvclock_page) {
|
||||
struct pvclock_vsyscall_time_info *pvti =
|
||||
pvclock_pvti_cpu0_va();
|
||||
pvclock_get_pvti_cpu0_va();
|
||||
if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
|
||||
ret = vm_insert_pfn(
|
||||
vma,
|
||||
|
||||
@@ -182,6 +182,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
|
||||
|
||||
void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
|
||||
|
||||
/*
|
||||
* Init a new mm. Used on mm copies, like at fork()
|
||||
* and on mm's that are brand-new, like at execve().
|
||||
*/
|
||||
static inline int init_new_context(struct task_struct *tsk,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
@@ -232,8 +236,22 @@ do { \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
static inline void arch_dup_pkeys(struct mm_struct *oldmm,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
|
||||
return;
|
||||
|
||||
/* Duplicate the oldmm pkey state in mm: */
|
||||
mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
|
||||
mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
|
||||
{
|
||||
arch_dup_pkeys(oldmm, mm);
|
||||
paravirt_arch_dup_mmap(oldmm, mm);
|
||||
return ldt_dup_context(oldmm, mm);
|
||||
}
|
||||
|
||||
@@ -5,15 +5,6 @@
|
||||
#include <linux/clocksource.h>
|
||||
#include <asm/pvclock-abi.h>
|
||||
|
||||
#ifdef CONFIG_KVM_GUEST
|
||||
extern struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void);
|
||||
#else
|
||||
static inline struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* some helper functions for xen and kvm pv clock sources */
|
||||
u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
|
||||
u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
|
||||
@@ -102,4 +93,14 @@ struct pvclock_vsyscall_time_info {
|
||||
|
||||
#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_CLOCK
|
||||
void pvclock_set_pvti_cpu0_va(struct pvclock_vsyscall_time_info *pvti);
|
||||
struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void);
|
||||
#else
|
||||
static inline struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_PVCLOCK_H */
|
||||
|
||||
@@ -47,12 +47,6 @@ early_param("no-kvmclock", parse_no_kvmclock);
|
||||
static struct pvclock_vsyscall_time_info *hv_clock;
|
||||
static struct pvclock_wall_clock wall_clock;
|
||||
|
||||
struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void)
|
||||
{
|
||||
return hv_clock;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pvclock_pvti_cpu0_va);
|
||||
|
||||
/*
|
||||
* The wallclock is the time of day when we booted. Since then, some time may
|
||||
* have elapsed since the hypervisor wrote the data. So we try to account for
|
||||
@@ -335,6 +329,7 @@ int __init kvm_setup_vsyscall_timeinfo(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
pvclock_set_pvti_cpu0_va(hv_clock);
|
||||
put_cpu();
|
||||
|
||||
kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
|
||||
|
||||
@@ -25,8 +25,10 @@
|
||||
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/pvclock.h>
|
||||
#include <asm/vgtod.h>
|
||||
|
||||
static u8 valid_flags __read_mostly = 0;
|
||||
static struct pvclock_vsyscall_time_info *pvti_cpu0_va __read_mostly;
|
||||
|
||||
void pvclock_set_flags(u8 flags)
|
||||
{
|
||||
@@ -144,3 +146,15 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
|
||||
|
||||
set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
|
||||
}
|
||||
|
||||
void pvclock_set_pvti_cpu0_va(struct pvclock_vsyscall_time_info *pvti)
|
||||
{
|
||||
WARN_ON(vclock_was_used(VCLOCK_PVCLOCK));
|
||||
pvti_cpu0_va = pvti;
|
||||
}
|
||||
|
||||
struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void)
|
||||
{
|
||||
return pvti_cpu0_va;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pvclock_get_pvti_cpu0_va);
|
||||
|
||||
@@ -5923,8 +5923,7 @@ restart:
|
||||
toggle_interruptibility(vcpu, ctxt->interruptibility);
|
||||
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
|
||||
kvm_rip_write(vcpu, ctxt->eip);
|
||||
if (r == EMULATE_DONE &&
|
||||
(ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
|
||||
if (r == EMULATE_DONE && ctxt->tf)
|
||||
kvm_vcpu_do_singlestep(vcpu, &r);
|
||||
if (!ctxt->have_exception ||
|
||||
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
|
||||
@@ -7423,14 +7422,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
}
|
||||
}
|
||||
|
||||
kvm_load_guest_fpu(vcpu);
|
||||
|
||||
if (unlikely(vcpu->arch.complete_userspace_io)) {
|
||||
int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
|
||||
vcpu->arch.complete_userspace_io = NULL;
|
||||
r = cui(vcpu);
|
||||
if (r <= 0)
|
||||
goto out_fpu;
|
||||
goto out;
|
||||
} else
|
||||
WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
|
||||
|
||||
@@ -7439,8 +7436,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
else
|
||||
r = vcpu_run(vcpu);
|
||||
|
||||
out_fpu:
|
||||
kvm_put_guest_fpu(vcpu);
|
||||
out:
|
||||
kvm_put_guest_fpu(vcpu);
|
||||
post_kvm_run_save(vcpu);
|
||||
|
||||
@@ -36,8 +36,8 @@ static inline u16 i8254(void)
|
||||
u16 status, timer;
|
||||
|
||||
do {
|
||||
outb(I8254_PORT_CONTROL,
|
||||
I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
|
||||
outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
|
||||
I8254_PORT_CONTROL);
|
||||
status = inb(I8254_PORT_COUNTER0);
|
||||
timer = inb(I8254_PORT_COUNTER0);
|
||||
timer |= inb(I8254_PORT_COUNTER0) << 8;
|
||||
|
||||
@@ -22,6 +22,8 @@ static DEFINE_PER_CPU(u64, spec_ctrl);
|
||||
|
||||
void xen_arch_pre_suspend(void)
|
||||
{
|
||||
xen_save_time_memory_area();
|
||||
|
||||
if (xen_pv_domain())
|
||||
xen_pv_pre_suspend();
|
||||
}
|
||||
@@ -32,6 +34,8 @@ void xen_arch_post_suspend(int cancelled)
|
||||
xen_pv_post_suspend(cancelled);
|
||||
else
|
||||
xen_hvm_post_suspend(cancelled);
|
||||
|
||||
xen_restore_time_memory_area();
|
||||
}
|
||||
|
||||
static void xen_vcpu_notify_restore(void *data)
|
||||
|
||||
@@ -31,6 +31,8 @@
|
||||
/* Xen may fire a timer up to this many ns early */
|
||||
#define TIMER_SLOP 100000
|
||||
|
||||
static u64 xen_sched_clock_offset __read_mostly;
|
||||
|
||||
/* Get the TSC speed from Xen */
|
||||
static unsigned long xen_tsc_khz(void)
|
||||
{
|
||||
@@ -57,6 +59,11 @@ static u64 xen_clocksource_get_cycles(struct clocksource *cs)
|
||||
return xen_clocksource_read();
|
||||
}
|
||||
|
||||
static u64 xen_sched_clock(void)
|
||||
{
|
||||
return xen_clocksource_read() - xen_sched_clock_offset;
|
||||
}
|
||||
|
||||
static void xen_read_wallclock(struct timespec *ts)
|
||||
{
|
||||
struct shared_info *s = HYPERVISOR_shared_info;
|
||||
@@ -354,8 +361,6 @@ void xen_timer_resume(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
pvclock_resume();
|
||||
|
||||
if (xen_clockevent != &xen_vcpuop_clockevent)
|
||||
return;
|
||||
|
||||
@@ -367,12 +372,107 @@ void xen_timer_resume(void)
|
||||
}
|
||||
|
||||
static const struct pv_time_ops xen_time_ops __initconst = {
|
||||
.sched_clock = xen_clocksource_read,
|
||||
.sched_clock = xen_sched_clock,
|
||||
.steal_clock = xen_steal_clock,
|
||||
};
|
||||
|
||||
static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
|
||||
static u64 xen_clock_value_saved;
|
||||
|
||||
void xen_save_time_memory_area(void)
|
||||
{
|
||||
struct vcpu_register_time_memory_area t;
|
||||
int ret;
|
||||
|
||||
xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset;
|
||||
|
||||
if (!xen_clock)
|
||||
return;
|
||||
|
||||
t.addr.v = NULL;
|
||||
|
||||
ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
|
||||
if (ret != 0)
|
||||
pr_notice("Cannot save secondary vcpu_time_info (err %d)",
|
||||
ret);
|
||||
else
|
||||
clear_page(xen_clock);
|
||||
}
|
||||
|
||||
void xen_restore_time_memory_area(void)
|
||||
{
|
||||
struct vcpu_register_time_memory_area t;
|
||||
int ret;
|
||||
|
||||
if (!xen_clock)
|
||||
goto out;
|
||||
|
||||
t.addr.v = &xen_clock->pvti;
|
||||
|
||||
ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
|
||||
|
||||
/*
|
||||
* We don't disable VCLOCK_PVCLOCK entirely if it fails to register the
|
||||
* secondary time info with Xen or if we migrated to a host without the
|
||||
* necessary flags. On both of these cases what happens is either
|
||||
* process seeing a zeroed out pvti or seeing no PVCLOCK_TSC_STABLE_BIT
|
||||
* bit set. Userspace checks the latter and if 0, it discards the data
|
||||
* in pvti and fallbacks to a system call for a reliable timestamp.
|
||||
*/
|
||||
if (ret != 0)
|
||||
pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
|
||||
ret);
|
||||
|
||||
out:
|
||||
/* Need pvclock_resume() before using xen_clocksource_read(). */
|
||||
pvclock_resume();
|
||||
xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved;
|
||||
}
|
||||
|
||||
static void xen_setup_vsyscall_time_info(void)
|
||||
{
|
||||
struct vcpu_register_time_memory_area t;
|
||||
struct pvclock_vsyscall_time_info *ti;
|
||||
int ret;
|
||||
|
||||
ti = (struct pvclock_vsyscall_time_info *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!ti)
|
||||
return;
|
||||
|
||||
t.addr.v = &ti->pvti;
|
||||
|
||||
ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t);
|
||||
if (ret) {
|
||||
pr_notice("xen: VCLOCK_PVCLOCK not supported (err %d)\n", ret);
|
||||
free_page((unsigned long)ti);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If primary time info had this bit set, secondary should too since
|
||||
* it's the same data on both just different memory regions. But we
|
||||
* still check it in case hypervisor is buggy.
|
||||
*/
|
||||
if (!(ti->pvti.flags & PVCLOCK_TSC_STABLE_BIT)) {
|
||||
t.addr.v = NULL;
|
||||
ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area,
|
||||
0, &t);
|
||||
if (!ret)
|
||||
free_page((unsigned long)ti);
|
||||
|
||||
pr_notice("xen: VCLOCK_PVCLOCK not supported (tsc unstable)\n");
|
||||
return;
|
||||
}
|
||||
|
||||
xen_clock = ti;
|
||||
pvclock_set_pvti_cpu0_va(xen_clock);
|
||||
|
||||
xen_clocksource.archdata.vclock_mode = VCLOCK_PVCLOCK;
|
||||
}
|
||||
|
||||
static void __init xen_time_init(void)
|
||||
{
|
||||
struct pvclock_vcpu_time_info *pvti;
|
||||
int cpu = smp_processor_id();
|
||||
struct timespec tp;
|
||||
|
||||
@@ -396,6 +496,16 @@ static void __init xen_time_init(void)
|
||||
|
||||
setup_force_cpu_cap(X86_FEATURE_TSC);
|
||||
|
||||
/*
|
||||
* We check ahead on the primary time info if this
|
||||
* bit is supported hence speeding up Xen clocksource.
|
||||
*/
|
||||
pvti = &__this_cpu_read(xen_vcpu)->time;
|
||||
if (pvti->flags & PVCLOCK_TSC_STABLE_BIT) {
|
||||
pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
|
||||
xen_setup_vsyscall_time_info();
|
||||
}
|
||||
|
||||
xen_setup_runstate_info(cpu);
|
||||
xen_setup_timer(cpu);
|
||||
xen_setup_cpu_clockevents();
|
||||
@@ -408,6 +518,7 @@ static void __init xen_time_init(void)
|
||||
|
||||
void __ref xen_init_time_ops(void)
|
||||
{
|
||||
xen_sched_clock_offset = xen_clocksource_read();
|
||||
pv_time_ops = xen_time_ops;
|
||||
|
||||
x86_init.timers.timer_init = xen_time_init;
|
||||
@@ -450,6 +561,7 @@ void __init xen_hvm_init_time_ops(void)
|
||||
return;
|
||||
}
|
||||
|
||||
xen_sched_clock_offset = xen_clocksource_read();
|
||||
pv_time_ops = xen_time_ops;
|
||||
x86_init.timers.setup_percpu_clockev = xen_time_init;
|
||||
x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
|
||||
|
||||
@@ -70,6 +70,8 @@ void xen_setup_runstate_info(int cpu);
|
||||
void xen_teardown_timer(int cpu);
|
||||
u64 xen_clocksource_read(void);
|
||||
void xen_setup_cpu_clockevents(void);
|
||||
void xen_save_time_memory_area(void);
|
||||
void xen_restore_time_memory_area(void);
|
||||
void __init xen_init_time_ops(void);
|
||||
void __init xen_hvm_init_time_ops(void);
|
||||
|
||||
|
||||
@@ -208,6 +208,32 @@ static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
|
||||
return xlat_nvdimm_status(buf, cmd, status);
|
||||
}
|
||||
|
||||
static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
|
||||
struct nd_cmd_pkg *call_pkg)
|
||||
{
|
||||
if (call_pkg) {
|
||||
int i;
|
||||
|
||||
if (nfit_mem->family != call_pkg->nd_family)
|
||||
return -ENOTTY;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
|
||||
if (call_pkg->nd_reserved2[i])
|
||||
return -EINVAL;
|
||||
return call_pkg->nd_command;
|
||||
}
|
||||
|
||||
/* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
|
||||
if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
|
||||
return cmd;
|
||||
|
||||
/*
|
||||
* Force function number validation to fail since 0 is never
|
||||
* published as a valid function in dsm_mask.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
||||
unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
|
||||
{
|
||||
@@ -220,21 +246,11 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
||||
unsigned long cmd_mask, dsm_mask;
|
||||
u32 offset, fw_status = 0;
|
||||
acpi_handle handle;
|
||||
unsigned int func;
|
||||
const guid_t *guid;
|
||||
int rc, i;
|
||||
int func, rc, i;
|
||||
|
||||
if (cmd_rc)
|
||||
*cmd_rc = -EINVAL;
|
||||
func = cmd;
|
||||
if (cmd == ND_CMD_CALL) {
|
||||
call_pkg = buf;
|
||||
func = call_pkg->nd_command;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
|
||||
if (call_pkg->nd_reserved2[i])
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (nvdimm) {
|
||||
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
|
||||
@@ -242,9 +258,12 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
||||
|
||||
if (!adev)
|
||||
return -ENOTTY;
|
||||
if (call_pkg && nfit_mem->family != call_pkg->nd_family)
|
||||
return -ENOTTY;
|
||||
|
||||
if (cmd == ND_CMD_CALL)
|
||||
call_pkg = buf;
|
||||
func = cmd_to_func(nfit_mem, cmd, call_pkg);
|
||||
if (func < 0)
|
||||
return func;
|
||||
dimm_name = nvdimm_name(nvdimm);
|
||||
cmd_name = nvdimm_cmd_name(cmd);
|
||||
cmd_mask = nvdimm_cmd_mask(nvdimm);
|
||||
@@ -255,6 +274,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
||||
} else {
|
||||
struct acpi_device *adev = to_acpi_dev(acpi_desc);
|
||||
|
||||
func = cmd;
|
||||
cmd_name = nvdimm_bus_cmd_name(cmd);
|
||||
cmd_mask = nd_desc->cmd_mask;
|
||||
dsm_mask = cmd_mask;
|
||||
@@ -269,7 +289,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
|
||||
if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
|
||||
return -ENOTTY;
|
||||
|
||||
if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
|
||||
/*
|
||||
* Check for a valid command. For ND_CMD_CALL, we also have to
|
||||
* make sure that the DSM function is supported.
|
||||
*/
|
||||
if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
|
||||
return -ENOTTY;
|
||||
else if (!test_bit(cmd, &cmd_mask))
|
||||
return -ENOTTY;
|
||||
|
||||
in_obj.type = ACPI_TYPE_PACKAGE;
|
||||
@@ -1503,6 +1529,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Function 0 is the command interrogation function, don't
|
||||
* export it to potential userspace use, and enable it to be
|
||||
* used as an error value in acpi_nfit_ctl().
|
||||
*/
|
||||
dsm_mask &= ~1UL;
|
||||
|
||||
guid = to_nfit_uuid(nfit_mem->family);
|
||||
for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
|
||||
if (acpi_check_dsm(adev_dimm->handle, guid, 1, 1ULL << i))
|
||||
|
||||
@@ -59,6 +59,7 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/serial_8250.h>
|
||||
#include <linux/nospec.h>
|
||||
#include "smapi.h"
|
||||
#include "mwavedd.h"
|
||||
#include "3780i.h"
|
||||
@@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
|
||||
ipcnum);
|
||||
return -EINVAL;
|
||||
}
|
||||
ipcnum = array_index_nospec(ipcnum,
|
||||
ARRAY_SIZE(pDrvData->IPCs));
|
||||
PRINTK_3(TRACE_MWAVE,
|
||||
"mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
|
||||
" ipcnum %x entry usIntCount %x\n",
|
||||
@@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
|
||||
" Invalid ipcnum %x\n", ipcnum);
|
||||
return -EINVAL;
|
||||
}
|
||||
ipcnum = array_index_nospec(ipcnum,
|
||||
ARRAY_SIZE(pDrvData->IPCs));
|
||||
PRINTK_3(TRACE_MWAVE,
|
||||
"mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
|
||||
" ipcnum %x, usIntCount %x\n",
|
||||
@@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
|
||||
ipcnum);
|
||||
return -EINVAL;
|
||||
}
|
||||
ipcnum = array_index_nospec(ipcnum,
|
||||
ARRAY_SIZE(pDrvData->IPCs));
|
||||
mutex_lock(&mwave_mutex);
|
||||
if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
|
||||
pDrvData->IPCs[ipcnum].bIsEnabled = false;
|
||||
|
||||
@@ -846,12 +846,14 @@ static unsigned long handle_pg_range(unsigned long pg_start,
|
||||
pfn_cnt -= pgs_ol;
|
||||
/*
|
||||
* Check if the corresponding memory block is already
|
||||
* online by checking its last previously backed page.
|
||||
* In case it is we need to bring rest (which was not
|
||||
* backed previously) online too.
|
||||
* online. It is possible to observe struct pages still
|
||||
* being uninitialized here so check section instead.
|
||||
* In case the section is online we need to bring the
|
||||
* rest of pfns (which were not backed previously)
|
||||
* online too.
|
||||
*/
|
||||
if (start_pfn > has->start_pfn &&
|
||||
!PageReserved(pfn_to_page(start_pfn - 1)))
|
||||
online_section_nr(pfn_to_section_nr(start_pfn)))
|
||||
hv_bring_pgs_online(has, start_pfn, pgs_ol);
|
||||
|
||||
}
|
||||
|
||||
@@ -141,26 +141,25 @@ static u32 hv_copyto_ringbuffer(
|
||||
}
|
||||
|
||||
/* Get various debug metrics for the specified ring buffer. */
|
||||
void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
||||
struct hv_ring_buffer_debug_info *debug_info)
|
||||
int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
||||
struct hv_ring_buffer_debug_info *debug_info)
|
||||
{
|
||||
u32 bytes_avail_towrite;
|
||||
u32 bytes_avail_toread;
|
||||
|
||||
if (ring_info->ring_buffer) {
|
||||
hv_get_ringbuffer_availbytes(ring_info,
|
||||
&bytes_avail_toread,
|
||||
&bytes_avail_towrite);
|
||||
if (!ring_info->ring_buffer)
|
||||
return -EINVAL;
|
||||
|
||||
debug_info->bytes_avail_toread = bytes_avail_toread;
|
||||
debug_info->bytes_avail_towrite = bytes_avail_towrite;
|
||||
debug_info->current_read_index =
|
||||
ring_info->ring_buffer->read_index;
|
||||
debug_info->current_write_index =
|
||||
ring_info->ring_buffer->write_index;
|
||||
debug_info->current_interrupt_mask =
|
||||
ring_info->ring_buffer->interrupt_mask;
|
||||
}
|
||||
hv_get_ringbuffer_availbytes(ring_info,
|
||||
&bytes_avail_toread,
|
||||
&bytes_avail_towrite);
|
||||
debug_info->bytes_avail_toread = bytes_avail_toread;
|
||||
debug_info->bytes_avail_towrite = bytes_avail_towrite;
|
||||
debug_info->current_read_index = ring_info->ring_buffer->read_index;
|
||||
debug_info->current_write_index = ring_info->ring_buffer->write_index;
|
||||
debug_info->current_interrupt_mask
|
||||
= ring_info->ring_buffer->interrupt_mask;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
|
||||
|
||||
|
||||
@@ -297,12 +297,16 @@ static ssize_t out_intr_mask_show(struct device *dev,
|
||||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct hv_ring_buffer_debug_info outbound;
|
||||
int ret;
|
||||
|
||||
if (!hv_dev->channel)
|
||||
return -ENODEV;
|
||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
||||
return -EINVAL;
|
||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
|
||||
|
||||
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
|
||||
&outbound);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
|
||||
}
|
||||
static DEVICE_ATTR_RO(out_intr_mask);
|
||||
@@ -312,12 +316,15 @@ static ssize_t out_read_index_show(struct device *dev,
|
||||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct hv_ring_buffer_debug_info outbound;
|
||||
int ret;
|
||||
|
||||
if (!hv_dev->channel)
|
||||
return -ENODEV;
|
||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
||||
return -EINVAL;
|
||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
|
||||
|
||||
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
|
||||
&outbound);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return sprintf(buf, "%d\n", outbound.current_read_index);
|
||||
}
|
||||
static DEVICE_ATTR_RO(out_read_index);
|
||||
@@ -328,12 +335,15 @@ static ssize_t out_write_index_show(struct device *dev,
|
||||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct hv_ring_buffer_debug_info outbound;
|
||||
int ret;
|
||||
|
||||
if (!hv_dev->channel)
|
||||
return -ENODEV;
|
||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
||||
return -EINVAL;
|
||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
|
||||
|
||||
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
|
||||
&outbound);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return sprintf(buf, "%d\n", outbound.current_write_index);
|
||||
}
|
||||
static DEVICE_ATTR_RO(out_write_index);
|
||||
@@ -344,12 +354,15 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
|
||||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct hv_ring_buffer_debug_info outbound;
|
||||
int ret;
|
||||
|
||||
if (!hv_dev->channel)
|
||||
return -ENODEV;
|
||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
||||
return -EINVAL;
|
||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
|
||||
|
||||
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
|
||||
&outbound);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
|
||||
}
|
||||
static DEVICE_ATTR_RO(out_read_bytes_avail);
|
||||
@@ -360,12 +373,15 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
|
||||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct hv_ring_buffer_debug_info outbound;
|
||||
int ret;
|
||||
|
||||
if (!hv_dev->channel)
|
||||
return -ENODEV;
|
||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
||||
return -EINVAL;
|
||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
|
||||
|
||||
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
|
||||
&outbound);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
|
||||
}
|
||||
static DEVICE_ATTR_RO(out_write_bytes_avail);
|
||||
@@ -375,12 +391,15 @@ static ssize_t in_intr_mask_show(struct device *dev,
|
||||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct hv_ring_buffer_debug_info inbound;
|
||||
int ret;
|
||||
|
||||
if (!hv_dev->channel)
|
||||
return -ENODEV;
|
||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
||||
return -EINVAL;
|
||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||
|
||||
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
|
||||
}
|
||||
static DEVICE_ATTR_RO(in_intr_mask);
|
||||
@@ -390,12 +409,15 @@ static ssize_t in_read_index_show(struct device *dev,
|
||||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct hv_ring_buffer_debug_info inbound;
|
||||
int ret;
|
||||
|
||||
if (!hv_dev->channel)
|
||||
return -ENODEV;
|
||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
||||
return -EINVAL;
|
||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||
|
||||
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return sprintf(buf, "%d\n", inbound.current_read_index);
|
||||
}
|
||||
static DEVICE_ATTR_RO(in_read_index);
|
||||
@@ -405,12 +427,15 @@ static ssize_t in_write_index_show(struct device *dev,
|
||||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct hv_ring_buffer_debug_info inbound;
|
||||
int ret;
|
||||
|
||||
if (!hv_dev->channel)
|
||||
return -ENODEV;
|
||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
||||
return -EINVAL;
|
||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||
|
||||
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return sprintf(buf, "%d\n", inbound.current_write_index);
|
||||
}
|
||||
static DEVICE_ATTR_RO(in_write_index);
|
||||
@@ -421,12 +446,15 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
|
||||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct hv_ring_buffer_debug_info inbound;
|
||||
int ret;
|
||||
|
||||
if (!hv_dev->channel)
|
||||
return -ENODEV;
|
||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
||||
return -EINVAL;
|
||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||
|
||||
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
|
||||
}
|
||||
static DEVICE_ATTR_RO(in_read_bytes_avail);
|
||||
@@ -437,12 +465,15 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
|
||||
{
|
||||
struct hv_device *hv_dev = device_to_hv_device(dev);
|
||||
struct hv_ring_buffer_debug_info inbound;
|
||||
int ret;
|
||||
|
||||
if (!hv_dev->channel)
|
||||
return -ENODEV;
|
||||
if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
|
||||
return -EINVAL;
|
||||
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||
|
||||
ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
|
||||
}
|
||||
static DEVICE_ATTR_RO(in_write_bytes_avail);
|
||||
|
||||
@@ -255,6 +255,8 @@ static const struct xpad_device {
|
||||
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
|
||||
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
|
||||
{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
|
||||
{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
|
||||
{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
|
||||
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
|
||||
{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
|
||||
{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
|
||||
@@ -431,6 +433,7 @@ static const struct usb_device_id xpad_table[] = {
|
||||
XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
|
||||
XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
|
||||
XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
|
||||
XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
|
||||
XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
|
||||
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
|
||||
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
|
||||
|
||||
@@ -39,6 +39,7 @@
|
||||
#include <linux/fs.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/uinput.h>
|
||||
#include <linux/overflow.h>
|
||||
#include <linux/input/mt.h>
|
||||
#include "../input-compat.h"
|
||||
|
||||
@@ -356,7 +357,7 @@ static int uinput_open(struct inode *inode, struct file *file)
|
||||
static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
|
||||
const struct input_absinfo *abs)
|
||||
{
|
||||
int min, max;
|
||||
int min, max, range;
|
||||
|
||||
min = abs->minimum;
|
||||
max = abs->maximum;
|
||||
@@ -368,7 +369,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (abs->flat > max - min) {
|
||||
if (!check_sub_overflow(max, min, &range) && abs->flat > range) {
|
||||
printk(KERN_DEBUG
|
||||
"%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n",
|
||||
UINPUT_NAME, code, abs->flat, min, max);
|
||||
|
||||
@@ -2086,13 +2086,14 @@ static void its_free_device(struct its_device *its_dev)
|
||||
kfree(its_dev);
|
||||
}
|
||||
|
||||
static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
|
||||
static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
|
||||
{
|
||||
int idx;
|
||||
|
||||
idx = find_first_zero_bit(dev->event_map.lpi_map,
|
||||
dev->event_map.nr_lpis);
|
||||
if (idx == dev->event_map.nr_lpis)
|
||||
idx = bitmap_find_free_region(dev->event_map.lpi_map,
|
||||
dev->event_map.nr_lpis,
|
||||
get_count_order(nvecs));
|
||||
if (idx < 0)
|
||||
return -ENOSPC;
|
||||
|
||||
*hwirq = dev->event_map.lpi_base + idx;
|
||||
@@ -2188,21 +2189,21 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
int err;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_irqs; i++) {
|
||||
err = its_alloc_device_irq(its_dev, &hwirq);
|
||||
if (err)
|
||||
return err;
|
||||
err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
|
||||
for (i = 0; i < nr_irqs; i++) {
|
||||
err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
irq_domain_set_hwirq_and_chip(domain, virq + i,
|
||||
hwirq, &its_irq_chip, its_dev);
|
||||
hwirq + i, &its_irq_chip, its_dev);
|
||||
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
|
||||
pr_debug("ID:%d pID:%d vID:%d\n",
|
||||
(int)(hwirq - its_dev->event_map.lpi_base),
|
||||
(int) hwirq, virq + i);
|
||||
(int)(hwirq + i - its_dev->event_map.lpi_base),
|
||||
(int)(hwirq + i), virq + i);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -2413,9 +2413,21 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key
|
||||
* capi:cipher_api_spec-iv:ivopts
|
||||
*/
|
||||
tmp = &cipher_in[strlen("capi:")];
|
||||
cipher_api = strsep(&tmp, "-");
|
||||
*ivmode = strsep(&tmp, ":");
|
||||
*ivopts = tmp;
|
||||
|
||||
/* Separate IV options if present, it can contain another '-' in hash name */
|
||||
*ivopts = strrchr(tmp, ':');
|
||||
if (*ivopts) {
|
||||
**ivopts = '\0';
|
||||
(*ivopts)++;
|
||||
}
|
||||
/* Parse IV mode */
|
||||
*ivmode = strrchr(tmp, '-');
|
||||
if (*ivmode) {
|
||||
**ivmode = '\0';
|
||||
(*ivmode)++;
|
||||
}
|
||||
/* The rest is crypto API spec */
|
||||
cipher_api = tmp;
|
||||
|
||||
if (*ivmode && !strcmp(*ivmode, "lmk"))
|
||||
cc->tfms_count = 64;
|
||||
@@ -2485,11 +2497,8 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
|
||||
goto bad_mem;
|
||||
|
||||
chainmode = strsep(&tmp, "-");
|
||||
*ivopts = strsep(&tmp, "-");
|
||||
*ivmode = strsep(&*ivopts, ":");
|
||||
|
||||
if (tmp)
|
||||
DMWARN("Ignoring unexpected additional cipher options");
|
||||
*ivmode = strsep(&tmp, ":");
|
||||
*ivopts = tmp;
|
||||
|
||||
/*
|
||||
* For compatibility with the original dm-crypt mapping format, if
|
||||
|
||||
@@ -1687,7 +1687,7 @@ int dm_thin_remove_range(struct dm_thin_device *td,
|
||||
return r;
|
||||
}
|
||||
|
||||
int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
|
||||
int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
|
||||
{
|
||||
int r;
|
||||
uint32_t ref_count;
|
||||
@@ -1695,7 +1695,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu
|
||||
down_read(&pmd->root_lock);
|
||||
r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
|
||||
if (!r)
|
||||
*result = (ref_count != 0);
|
||||
*result = (ref_count > 1);
|
||||
up_read(&pmd->root_lock);
|
||||
|
||||
return r;
|
||||
|
||||
@@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
|
||||
|
||||
int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
|
||||
|
||||
int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
|
||||
int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
|
||||
|
||||
int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
|
||||
int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
|
||||
|
||||
@@ -1042,7 +1042,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
|
||||
* passdown we have to check that these blocks are now unused.
|
||||
*/
|
||||
int r = 0;
|
||||
bool used = true;
|
||||
bool shared = true;
|
||||
struct thin_c *tc = m->tc;
|
||||
struct pool *pool = tc->pool;
|
||||
dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
|
||||
@@ -1052,11 +1052,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
|
||||
while (b != end) {
|
||||
/* find start of unmapped run */
|
||||
for (; b < end; b++) {
|
||||
r = dm_pool_block_is_used(pool->pmd, b, &used);
|
||||
r = dm_pool_block_is_shared(pool->pmd, b, &shared);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
if (!used)
|
||||
if (!shared)
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1065,11 +1065,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
|
||||
|
||||
/* find end of run */
|
||||
for (e = b + 1; e != end; e++) {
|
||||
r = dm_pool_block_is_used(pool->pmd, e, &used);
|
||||
r = dm_pool_block_is_shared(pool->pmd, e, &shared);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
if (used)
|
||||
if (shared)
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
@@ -127,6 +127,8 @@
|
||||
#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
|
||||
#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
|
||||
|
||||
#define MEI_DEV_ID_DNV_IE 0x19E5 /* Denverton IE */
|
||||
|
||||
#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
|
||||
|
||||
#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
|
||||
|
||||
@@ -93,6 +93,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
|
||||
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
|
||||
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
|
||||
|
||||
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
|
||||
|
||||
@@ -429,6 +429,7 @@ config MMC_SDHCI_MSM
|
||||
tristate "Qualcomm SDHCI Controller Support"
|
||||
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
|
||||
depends on MMC_SDHCI_PLTFM
|
||||
select MMC_SDHCI_IO_ACCESSORS
|
||||
help
|
||||
This selects the Secure Digital Host Controller Interface (SDHCI)
|
||||
support present in Qualcomm SOCs. The controller supports
|
||||
|
||||
@@ -479,8 +479,6 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
|
||||
struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
|
||||
{
|
||||
struct can_priv *priv = netdev_priv(dev);
|
||||
struct sk_buff *skb = priv->echo_skb[idx];
|
||||
struct canfd_frame *cf;
|
||||
|
||||
if (idx >= priv->echo_skb_max) {
|
||||
netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
|
||||
@@ -488,20 +486,21 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!skb) {
|
||||
netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
|
||||
__func__, idx);
|
||||
return NULL;
|
||||
if (priv->echo_skb[idx]) {
|
||||
/* Using "struct canfd_frame::len" for the frame
|
||||
* length is supported on both CAN and CANFD frames.
|
||||
*/
|
||||
struct sk_buff *skb = priv->echo_skb[idx];
|
||||
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
|
||||
u8 len = cf->len;
|
||||
|
||||
*len_ptr = len;
|
||||
priv->echo_skb[idx] = NULL;
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
/* Using "struct canfd_frame::len" for the frame
|
||||
* length is supported on both CAN and CANFD frames.
|
||||
*/
|
||||
cf = (struct canfd_frame *)skb->data;
|
||||
*len_ptr = cf->len;
|
||||
priv->echo_skb[idx] = NULL;
|
||||
|
||||
return skb;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -431,8 +431,6 @@
|
||||
#define MAC_MDIOSCAR_PA_WIDTH 5
|
||||
#define MAC_MDIOSCAR_RA_INDEX 0
|
||||
#define MAC_MDIOSCAR_RA_WIDTH 16
|
||||
#define MAC_MDIOSCAR_REG_INDEX 0
|
||||
#define MAC_MDIOSCAR_REG_WIDTH 21
|
||||
#define MAC_MDIOSCCDR_BUSY_INDEX 22
|
||||
#define MAC_MDIOSCCDR_BUSY_WIDTH 1
|
||||
#define MAC_MDIOSCCDR_CMD_INDEX 16
|
||||
|
||||
@@ -1284,6 +1284,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int xgbe_create_mdio_sca(int port, int reg)
|
||||
{
|
||||
unsigned int mdio_sca, da;
|
||||
|
||||
da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
|
||||
|
||||
mdio_sca = 0;
|
||||
XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
|
||||
XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
|
||||
XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
|
||||
|
||||
return mdio_sca;
|
||||
}
|
||||
|
||||
static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
|
||||
int reg, u16 val)
|
||||
{
|
||||
@@ -1291,9 +1305,7 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
|
||||
|
||||
reinit_completion(&pdata->mdio_complete);
|
||||
|
||||
mdio_sca = 0;
|
||||
XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
|
||||
XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
|
||||
mdio_sca = xgbe_create_mdio_sca(addr, reg);
|
||||
XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
|
||||
|
||||
mdio_sccd = 0;
|
||||
@@ -1317,9 +1329,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
|
||||
|
||||
reinit_completion(&pdata->mdio_complete);
|
||||
|
||||
mdio_sca = 0;
|
||||
XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
|
||||
XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
|
||||
mdio_sca = xgbe_create_mdio_sca(addr, reg);
|
||||
XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
|
||||
|
||||
mdio_sccd = 0;
|
||||
|
||||
@@ -444,7 +444,8 @@ struct stmmac_dma_ops {
|
||||
int rxfifosz);
|
||||
void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
|
||||
int fifosz);
|
||||
void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel);
|
||||
void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel,
|
||||
int fifosz);
|
||||
/* To track extra statistic (if supported) */
|
||||
void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
|
||||
void __iomem *ioaddr);
|
||||
|
||||
@@ -271,9 +271,10 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
|
||||
}
|
||||
|
||||
static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
|
||||
u32 channel)
|
||||
u32 channel, int fifosz)
|
||||
{
|
||||
u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
|
||||
unsigned int tqs = fifosz / 256 - 1;
|
||||
|
||||
if (mode == SF_DMA_MODE) {
|
||||
pr_debug("GMAC: enable TX store and forward mode\n");
|
||||
@@ -306,12 +307,14 @@ static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
|
||||
* For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W
|
||||
* with reset values: TXQEN off, TQS 256 bytes.
|
||||
*
|
||||
* Write the bits in both cases, since it will have no effect when RO.
|
||||
* For DWC_EQOS_NUM_TXQ > 1, the top bits in MTL_OP_MODE_TQS_MASK might
|
||||
* be RO, however, writing the whole TQS field will result in a value
|
||||
* equal to DWC_EQOS_TXFIFO_SIZE, just like for DWC_EQOS_NUM_TXQ == 1.
|
||||
* TXQEN must be written for multi-channel operation and TQS must
|
||||
* reflect the available fifo size per queue (total fifo size / number
|
||||
* of enabled queues).
|
||||
*/
|
||||
mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK;
|
||||
mtl_tx_op |= MTL_OP_MODE_TXQEN;
|
||||
mtl_tx_op &= ~MTL_OP_MODE_TQS_MASK;
|
||||
mtl_tx_op |= tqs << MTL_OP_MODE_TQS_SHIFT;
|
||||
|
||||
writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
|
||||
}
|
||||
|
||||
|
||||
@@ -1765,12 +1765,19 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
|
||||
u32 rx_channels_count = priv->plat->rx_queues_to_use;
|
||||
u32 tx_channels_count = priv->plat->tx_queues_to_use;
|
||||
int rxfifosz = priv->plat->rx_fifo_size;
|
||||
int txfifosz = priv->plat->tx_fifo_size;
|
||||
u32 txmode = 0;
|
||||
u32 rxmode = 0;
|
||||
u32 chan = 0;
|
||||
|
||||
if (rxfifosz == 0)
|
||||
rxfifosz = priv->dma_cap.rx_fifo_size;
|
||||
if (txfifosz == 0)
|
||||
txfifosz = priv->dma_cap.tx_fifo_size;
|
||||
|
||||
/* Adjust for real per queue fifo size */
|
||||
rxfifosz /= rx_channels_count;
|
||||
txfifosz /= tx_channels_count;
|
||||
|
||||
if (priv->plat->force_thresh_dma_mode) {
|
||||
txmode = tc;
|
||||
@@ -1798,7 +1805,8 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
|
||||
rxfifosz);
|
||||
|
||||
for (chan = 0; chan < tx_channels_count; chan++)
|
||||
priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
|
||||
priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
|
||||
txfifosz);
|
||||
} else {
|
||||
priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
|
||||
rxfifosz);
|
||||
@@ -1967,15 +1975,25 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
|
||||
static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
|
||||
u32 rxmode, u32 chan)
|
||||
{
|
||||
u32 rx_channels_count = priv->plat->rx_queues_to_use;
|
||||
u32 tx_channels_count = priv->plat->tx_queues_to_use;
|
||||
int rxfifosz = priv->plat->rx_fifo_size;
|
||||
int txfifosz = priv->plat->tx_fifo_size;
|
||||
|
||||
if (rxfifosz == 0)
|
||||
rxfifosz = priv->dma_cap.rx_fifo_size;
|
||||
if (txfifosz == 0)
|
||||
txfifosz = priv->dma_cap.tx_fifo_size;
|
||||
|
||||
/* Adjust for real per queue fifo size */
|
||||
rxfifosz /= rx_channels_count;
|
||||
txfifosz /= tx_channels_count;
|
||||
|
||||
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
|
||||
priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
|
||||
rxfifosz);
|
||||
priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
|
||||
priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
|
||||
txfifosz);
|
||||
} else {
|
||||
priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
|
||||
rxfifosz);
|
||||
|
||||
@@ -358,6 +358,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
|
||||
if (IS_ERR(gpiod)) {
|
||||
dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n",
|
||||
bus->id);
|
||||
device_del(&bus->dev);
|
||||
return PTR_ERR(gpiod);
|
||||
} else if (gpiod) {
|
||||
bus->reset_gpiod = gpiod;
|
||||
|
||||
@@ -445,6 +445,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
if (pskb_trim_rcsum(skb, len))
|
||||
goto drop;
|
||||
|
||||
ph = pppoe_hdr(skb);
|
||||
pn = pppoe_pernet(dev_net(dev));
|
||||
|
||||
/* Note that get_item does a sock_hold(), so sk_pppox(po)
|
||||
|
||||
@@ -137,6 +137,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
|
||||
static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
|
||||
static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
|
||||
static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
|
||||
static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
|
||||
struct nvmet_rdma_rsp *r);
|
||||
static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
|
||||
struct nvmet_rdma_rsp *r);
|
||||
|
||||
static struct nvmet_fabrics_ops nvmet_rdma_ops;
|
||||
|
||||
@@ -175,9 +179,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
|
||||
spin_unlock_irqrestore(&queue->rsps_lock, flags);
|
||||
|
||||
if (unlikely(!rsp)) {
|
||||
rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
|
||||
int ret;
|
||||
|
||||
rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
|
||||
if (unlikely(!rsp))
|
||||
return NULL;
|
||||
ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
|
||||
if (unlikely(ret)) {
|
||||
kfree(rsp);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rsp->allocated = true;
|
||||
}
|
||||
|
||||
@@ -189,7 +201,8 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (rsp->allocated) {
|
||||
if (unlikely(rsp->allocated)) {
|
||||
nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
|
||||
kfree(rsp);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -178,8 +178,11 @@ static int __init ptp_kvm_init(void)
|
||||
{
|
||||
long ret;
|
||||
|
||||
if (!kvm_para_available())
|
||||
return -ENODEV;
|
||||
|
||||
clock_pair_gpa = slow_virt_to_phys(&clock_pair);
|
||||
hv_clock = pvclock_pvti_cpu0_va();
|
||||
hv_clock = pvclock_get_pvti_cpu0_va();
|
||||
|
||||
if (!hv_clock)
|
||||
return -ENODEV;
|
||||
|
||||
@@ -60,7 +60,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
|
||||
|
||||
static void __ref sclp_cpu_change_notify(struct work_struct *work)
|
||||
{
|
||||
lock_device_hotplug();
|
||||
smp_rescan_cpus();
|
||||
unlock_device_hotplug();
|
||||
}
|
||||
|
||||
static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
|
||||
|
||||
@@ -43,6 +43,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
|
||||
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
|
||||
{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
|
||||
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
|
||||
{USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
|
||||
{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
|
||||
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
|
||||
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
|
||||
|
||||
@@ -598,6 +598,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
|
||||
/* too large for caller's buffer */
|
||||
ret = -EOVERFLOW;
|
||||
} else {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
if (copy_to_user(buf, rbuf->buf, rbuf->count))
|
||||
ret = -EFAULT;
|
||||
else
|
||||
|
||||
@@ -563,10 +563,12 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c)
|
||||
int ret = 0;
|
||||
|
||||
circ = &state->xmit;
|
||||
if (!circ->buf)
|
||||
return 0;
|
||||
|
||||
port = uart_port_lock(state, flags);
|
||||
if (!circ->buf) {
|
||||
uart_port_unlock(port, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (port && uart_circ_chars_free(circ) != 0) {
|
||||
circ->buf[circ->head] = c;
|
||||
circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
|
||||
@@ -599,11 +601,13 @@ static int uart_write(struct tty_struct *tty,
|
||||
return -EL3HLT;
|
||||
}
|
||||
|
||||
circ = &state->xmit;
|
||||
if (!circ->buf)
|
||||
return 0;
|
||||
|
||||
port = uart_port_lock(state, flags);
|
||||
circ = &state->xmit;
|
||||
if (!circ->buf) {
|
||||
uart_port_unlock(port, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
while (port) {
|
||||
c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
|
||||
if (count < c)
|
||||
|
||||
@@ -2180,7 +2180,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
|
||||
ld = tty_ldisc_ref_wait(tty);
|
||||
if (!ld)
|
||||
return -EIO;
|
||||
ld->ops->receive_buf(tty, &ch, &mbz, 1);
|
||||
if (ld->ops->receive_buf)
|
||||
ld->ops->receive_buf(tty, &ch, &mbz, 1);
|
||||
tty_ldisc_deref(ld);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -953,6 +953,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
|
||||
if (con_is_visible(vc))
|
||||
update_screen(vc);
|
||||
vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
|
||||
notify_update(vc);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -182,6 +182,8 @@ void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
|
||||
req->started = false;
|
||||
list_del(&req->list);
|
||||
req->remaining = 0;
|
||||
req->unaligned = false;
|
||||
req->zero = false;
|
||||
|
||||
if (req->request.status == -EINPROGRESS)
|
||||
req->request.status = status;
|
||||
|
||||
@@ -724,14 +724,16 @@ static int xhci_mtk_remove(struct platform_device *dev)
|
||||
struct xhci_hcd_mtk *mtk = platform_get_drvdata(dev);
|
||||
struct usb_hcd *hcd = mtk->hcd;
|
||||
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
|
||||
struct usb_hcd *shared_hcd = xhci->shared_hcd;
|
||||
|
||||
usb_remove_hcd(xhci->shared_hcd);
|
||||
usb_remove_hcd(shared_hcd);
|
||||
xhci->shared_hcd = NULL;
|
||||
xhci_mtk_phy_power_off(mtk);
|
||||
xhci_mtk_phy_exit(mtk);
|
||||
device_init_wakeup(&dev->dev, false);
|
||||
|
||||
usb_remove_hcd(hcd);
|
||||
usb_put_hcd(xhci->shared_hcd);
|
||||
usb_put_hcd(shared_hcd);
|
||||
usb_put_hcd(hcd);
|
||||
xhci_mtk_sch_exit(mtk);
|
||||
xhci_mtk_clks_disable(mtk);
|
||||
|
||||
@@ -370,6 +370,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
|
||||
if (xhci->shared_hcd) {
|
||||
usb_remove_hcd(xhci->shared_hcd);
|
||||
usb_put_hcd(xhci->shared_hcd);
|
||||
xhci->shared_hcd = NULL;
|
||||
}
|
||||
|
||||
/* Workaround for spurious wakeups at shutdown with HSW */
|
||||
|
||||
@@ -332,14 +332,16 @@ static int xhci_plat_remove(struct platform_device *dev)
|
||||
struct usb_hcd *hcd = platform_get_drvdata(dev);
|
||||
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
|
||||
struct clk *clk = xhci->clk;
|
||||
struct usb_hcd *shared_hcd = xhci->shared_hcd;
|
||||
|
||||
xhci->xhc_state |= XHCI_STATE_REMOVING;
|
||||
|
||||
usb_remove_hcd(xhci->shared_hcd);
|
||||
usb_remove_hcd(shared_hcd);
|
||||
xhci->shared_hcd = NULL;
|
||||
usb_phy_shutdown(hcd->usb_phy);
|
||||
|
||||
usb_remove_hcd(hcd);
|
||||
usb_put_hcd(xhci->shared_hcd);
|
||||
usb_put_hcd(shared_hcd);
|
||||
|
||||
if (!IS_ERR(clk))
|
||||
clk_disable_unprepare(clk);
|
||||
|
||||
@@ -1178,6 +1178,7 @@ static int tegra_xusb_remove(struct platform_device *pdev)
|
||||
|
||||
usb_remove_hcd(xhci->shared_hcd);
|
||||
usb_put_hcd(xhci->shared_hcd);
|
||||
xhci->shared_hcd = NULL;
|
||||
usb_remove_hcd(tegra->hcd);
|
||||
usb_put_hcd(tegra->hcd);
|
||||
|
||||
|
||||
@@ -669,8 +669,6 @@ static void xhci_stop(struct usb_hcd *hcd)
|
||||
|
||||
/* Only halt host and free memory after both hcds are removed */
|
||||
if (!usb_hcd_is_primary_hcd(hcd)) {
|
||||
/* usb core will free this hcd shortly, unset pointer */
|
||||
xhci->shared_hcd = NULL;
|
||||
mutex_unlock(&xhci->mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = {
|
||||
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
|
||||
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
|
||||
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
|
||||
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
|
||||
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
|
||||
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
|
||||
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
|
||||
#define PL2303_VENDOR_ID 0x067b
|
||||
#define PL2303_PRODUCT_ID 0x2303
|
||||
#define PL2303_PRODUCT_ID_TB 0x2304
|
||||
#define PL2303_PRODUCT_ID_RSAQ2 0x04bb
|
||||
#define PL2303_PRODUCT_ID_DCU11 0x1234
|
||||
#define PL2303_PRODUCT_ID_PHAROS 0xaaa0
|
||||
@@ -25,6 +26,7 @@
|
||||
#define PL2303_PRODUCT_ID_MOTOROLA 0x0307
|
||||
#define PL2303_PRODUCT_ID_ZTEK 0xe1f1
|
||||
|
||||
|
||||
#define ATEN_VENDOR_ID 0x0557
|
||||
#define ATEN_VENDOR_ID2 0x0547
|
||||
#define ATEN_PRODUCT_ID 0x2008
|
||||
|
||||
@@ -88,7 +88,8 @@ DEVICE(moto_modem, MOTO_IDS);
|
||||
/* Motorola Tetra driver */
|
||||
#define MOTOROLA_TETRA_IDS() \
|
||||
{ USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
|
||||
{ USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */
|
||||
{ USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
|
||||
{ USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */
|
||||
DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
|
||||
|
||||
/* Novatel Wireless GPS driver */
|
||||
|
||||
@@ -851,7 +851,8 @@ static void handle_rx(struct vhost_net *net)
|
||||
vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
|
||||
headcount);
|
||||
if (unlikely(vq_log))
|
||||
vhost_log_write(vq, vq_log, log, vhost_len);
|
||||
vhost_log_write(vq, vq_log, log, vhost_len,
|
||||
vq->iov, in);
|
||||
total_len += vhost_len;
|
||||
if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
|
||||
vhost_poll_queue(&vq->poll);
|
||||
|
||||
@@ -1726,13 +1726,87 @@ static int log_write(void __user *log_base,
|
||||
return r;
|
||||
}
|
||||
|
||||
static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
|
||||
{
|
||||
struct vhost_umem *umem = vq->umem;
|
||||
struct vhost_umem_node *u;
|
||||
u64 start, end, l, min;
|
||||
int r;
|
||||
bool hit = false;
|
||||
|
||||
while (len) {
|
||||
min = len;
|
||||
/* More than one GPAs can be mapped into a single HVA. So
|
||||
* iterate all possible umems here to be safe.
|
||||
*/
|
||||
list_for_each_entry(u, &umem->umem_list, link) {
|
||||
if (u->userspace_addr > hva - 1 + len ||
|
||||
u->userspace_addr - 1 + u->size < hva)
|
||||
continue;
|
||||
start = max(u->userspace_addr, hva);
|
||||
end = min(u->userspace_addr - 1 + u->size,
|
||||
hva - 1 + len);
|
||||
l = end - start + 1;
|
||||
r = log_write(vq->log_base,
|
||||
u->start + start - u->userspace_addr,
|
||||
l);
|
||||
if (r < 0)
|
||||
return r;
|
||||
hit = true;
|
||||
min = min(l, min);
|
||||
}
|
||||
|
||||
if (!hit)
|
||||
return -EFAULT;
|
||||
|
||||
len -= min;
|
||||
hva += min;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
|
||||
{
|
||||
struct iovec iov[64];
|
||||
int i, ret;
|
||||
|
||||
if (!vq->iotlb)
|
||||
return log_write(vq->log_base, vq->log_addr + used_offset, len);
|
||||
|
||||
ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
|
||||
len, iov, 64, VHOST_ACCESS_WO);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < ret; i++) {
|
||||
ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
|
||||
iov[i].iov_len);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
|
||||
unsigned int log_num, u64 len)
|
||||
unsigned int log_num, u64 len, struct iovec *iov, int count)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
/* Make sure data written is seen before log. */
|
||||
smp_wmb();
|
||||
|
||||
if (vq->iotlb) {
|
||||
for (i = 0; i < count; i++) {
|
||||
r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
|
||||
iov[i].iov_len);
|
||||
if (r < 0)
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < log_num; ++i) {
|
||||
u64 l = min(log[i].len, len);
|
||||
r = log_write(vq->log_base, log[i].addr, l);
|
||||
@@ -1762,9 +1836,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
|
||||
smp_wmb();
|
||||
/* Log used flag write. */
|
||||
used = &vq->used->flags;
|
||||
log_write(vq->log_base, vq->log_addr +
|
||||
(used - (void __user *)vq->used),
|
||||
sizeof vq->used->flags);
|
||||
log_used(vq, (used - (void __user *)vq->used),
|
||||
sizeof vq->used->flags);
|
||||
if (vq->log_ctx)
|
||||
eventfd_signal(vq->log_ctx, 1);
|
||||
}
|
||||
@@ -1782,9 +1855,8 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
|
||||
smp_wmb();
|
||||
/* Log avail event write */
|
||||
used = vhost_avail_event(vq);
|
||||
log_write(vq->log_base, vq->log_addr +
|
||||
(used - (void __user *)vq->used),
|
||||
sizeof *vhost_avail_event(vq));
|
||||
log_used(vq, (used - (void __user *)vq->used),
|
||||
sizeof *vhost_avail_event(vq));
|
||||
if (vq->log_ctx)
|
||||
eventfd_signal(vq->log_ctx, 1);
|
||||
}
|
||||
@@ -2189,10 +2261,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
|
||||
/* Make sure data is seen before log. */
|
||||
smp_wmb();
|
||||
/* Log used ring entry write. */
|
||||
log_write(vq->log_base,
|
||||
vq->log_addr +
|
||||
((void __user *)used - (void __user *)vq->used),
|
||||
count * sizeof *used);
|
||||
log_used(vq, ((void __user *)used - (void __user *)vq->used),
|
||||
count * sizeof *used);
|
||||
}
|
||||
old = vq->last_used_idx;
|
||||
new = (vq->last_used_idx += count);
|
||||
@@ -2234,9 +2304,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
|
||||
/* Make sure used idx is seen before log. */
|
||||
smp_wmb();
|
||||
/* Log used index update. */
|
||||
log_write(vq->log_base,
|
||||
vq->log_addr + offsetof(struct vring_used, idx),
|
||||
sizeof vq->used->idx);
|
||||
log_used(vq, offsetof(struct vring_used, idx),
|
||||
sizeof vq->used->idx);
|
||||
if (vq->log_ctx)
|
||||
eventfd_signal(vq->log_ctx, 1);
|
||||
}
|
||||
|
||||
@@ -208,7 +208,8 @@ bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
|
||||
bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
|
||||
|
||||
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
|
||||
unsigned int log_num, u64 len);
|
||||
unsigned int log_num, u64 len,
|
||||
struct iovec *iov, int count);
|
||||
int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
|
||||
|
||||
struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
|
||||
|
||||
@@ -1650,7 +1650,7 @@ void xen_callback_vector(void)
|
||||
xen_have_vector_callback = 0;
|
||||
return;
|
||||
}
|
||||
pr_info("Xen HVM callback vector for event delivery is enabled\n");
|
||||
pr_info_once("Xen HVM callback vector for event delivery is enabled\n");
|
||||
alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
|
||||
xen_hvm_callback_vector);
|
||||
}
|
||||
|
||||
@@ -351,6 +351,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
|
||||
break;
|
||||
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
|
||||
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
|
||||
ASSERT(0);
|
||||
ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED;
|
||||
goto leave;
|
||||
}
|
||||
@@ -395,6 +396,10 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
|
||||
if (IS_ERR(trans)) {
|
||||
ret = PTR_ERR(trans);
|
||||
btrfs_dev_replace_lock(dev_replace, 1);
|
||||
dev_replace->replace_state =
|
||||
BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED;
|
||||
dev_replace->srcdev = NULL;
|
||||
dev_replace->tgtdev = NULL;
|
||||
goto leave;
|
||||
}
|
||||
|
||||
@@ -416,8 +421,6 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
|
||||
return ret;
|
||||
|
||||
leave:
|
||||
dev_replace->srcdev = NULL;
|
||||
dev_replace->tgtdev = NULL;
|
||||
btrfs_dev_replace_unlock(dev_replace, 1);
|
||||
btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
|
||||
return ret;
|
||||
@@ -801,6 +804,8 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
|
||||
"cannot continue dev_replace, tgtdev is missing");
|
||||
btrfs_info(fs_info,
|
||||
"you may cancel the operation after 'mount -o degraded'");
|
||||
dev_replace->replace_state =
|
||||
BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
|
||||
btrfs_dev_replace_unlock(dev_replace, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1445,18 +1445,26 @@ cifs_discard_remaining_data(struct TCP_Server_Info *server)
|
||||
}
|
||||
|
||||
static int
|
||||
cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
|
||||
__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
|
||||
bool malformed)
|
||||
{
|
||||
int length;
|
||||
struct cifs_readdata *rdata = mid->callback_data;
|
||||
|
||||
length = cifs_discard_remaining_data(server);
|
||||
dequeue_mid(mid, rdata->result);
|
||||
dequeue_mid(mid, malformed);
|
||||
mid->resp_buf = server->smallbuf;
|
||||
server->smallbuf = NULL;
|
||||
return length;
|
||||
}
|
||||
|
||||
static int
|
||||
cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
|
||||
{
|
||||
struct cifs_readdata *rdata = mid->callback_data;
|
||||
|
||||
return __cifs_readv_discard(server, mid, rdata->result);
|
||||
}
|
||||
|
||||
int
|
||||
cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
|
||||
{
|
||||
@@ -1496,12 +1504,23 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* set up first two iov for signature check and to get credits */
|
||||
rdata->iov[0].iov_base = buf;
|
||||
rdata->iov[0].iov_len = 4;
|
||||
rdata->iov[1].iov_base = buf + 4;
|
||||
rdata->iov[1].iov_len = server->total_read - 4;
|
||||
cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
|
||||
rdata->iov[0].iov_base, rdata->iov[0].iov_len);
|
||||
cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
|
||||
rdata->iov[1].iov_base, rdata->iov[1].iov_len);
|
||||
|
||||
/* Was the SMB read successful? */
|
||||
rdata->result = server->ops->map_error(buf, false);
|
||||
if (rdata->result != 0) {
|
||||
cifs_dbg(FYI, "%s: server returned error %d\n",
|
||||
__func__, rdata->result);
|
||||
return cifs_readv_discard(server, mid);
|
||||
/* normal error on read response */
|
||||
return __cifs_readv_discard(server, mid, false);
|
||||
}
|
||||
|
||||
/* Is there enough to get to the rest of the READ_RSP header? */
|
||||
@@ -1544,14 +1563,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
|
||||
server->total_read += length;
|
||||
}
|
||||
|
||||
/* set up first iov for signature check */
|
||||
rdata->iov[0].iov_base = buf;
|
||||
rdata->iov[0].iov_len = 4;
|
||||
rdata->iov[1].iov_base = buf + 4;
|
||||
rdata->iov[1].iov_len = server->total_read - 4;
|
||||
cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n",
|
||||
rdata->iov[0].iov_base, server->total_read);
|
||||
|
||||
/* how much data is in the response? */
|
||||
data_len = server->ops->read_data_length(buf);
|
||||
if (data_offset + data_len > buflen) {
|
||||
|
||||
@@ -524,6 +524,21 @@ server_unresponsive(struct TCP_Server_Info *server)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
zero_credits(struct TCP_Server_Info *server)
|
||||
{
|
||||
int val;
|
||||
|
||||
spin_lock(&server->req_lock);
|
||||
val = server->credits + server->echo_credits + server->oplock_credits;
|
||||
if (server->in_flight == 0 && val == 0) {
|
||||
spin_unlock(&server->req_lock);
|
||||
return true;
|
||||
}
|
||||
spin_unlock(&server->req_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
static int
|
||||
cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
|
||||
{
|
||||
@@ -536,6 +551,12 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
|
||||
for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
|
||||
try_to_freeze();
|
||||
|
||||
/* reconnect if no credits and no requests in flight */
|
||||
if (zero_credits(server)) {
|
||||
cifs_reconnect(server);
|
||||
return -ECONNABORTED;
|
||||
}
|
||||
|
||||
if (server_unresponsive(server))
|
||||
return -ECONNABORTED;
|
||||
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
#include "smb2glob.h"
|
||||
#include "cifs_ioctl.h"
|
||||
|
||||
/* Change credits for different ops and return the total number of credits */
|
||||
static int
|
||||
change_conf(struct TCP_Server_Info *server)
|
||||
{
|
||||
@@ -40,17 +41,15 @@ change_conf(struct TCP_Server_Info *server)
|
||||
server->oplock_credits = server->echo_credits = 0;
|
||||
switch (server->credits) {
|
||||
case 0:
|
||||
return -1;
|
||||
return 0;
|
||||
case 1:
|
||||
server->echoes = false;
|
||||
server->oplocks = false;
|
||||
cifs_dbg(VFS, "disabling echoes and oplocks\n");
|
||||
break;
|
||||
case 2:
|
||||
server->echoes = true;
|
||||
server->oplocks = false;
|
||||
server->echo_credits = 1;
|
||||
cifs_dbg(FYI, "disabling oplocks\n");
|
||||
break;
|
||||
default:
|
||||
server->echoes = true;
|
||||
@@ -63,14 +62,15 @@ change_conf(struct TCP_Server_Info *server)
|
||||
server->echo_credits = 1;
|
||||
}
|
||||
server->credits -= server->echo_credits + server->oplock_credits;
|
||||
return 0;
|
||||
return server->credits + server->echo_credits + server->oplock_credits;
|
||||
}
|
||||
|
||||
static void
|
||||
smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
|
||||
const int optype)
|
||||
{
|
||||
int *val, rc = 0;
|
||||
int *val, rc = -1;
|
||||
|
||||
spin_lock(&server->req_lock);
|
||||
val = server->ops->get_credits_field(server, optype);
|
||||
*val += add;
|
||||
@@ -94,8 +94,26 @@ smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
|
||||
}
|
||||
spin_unlock(&server->req_lock);
|
||||
wake_up(&server->request_q);
|
||||
if (rc)
|
||||
cifs_reconnect(server);
|
||||
|
||||
if (server->tcpStatus == CifsNeedReconnect)
|
||||
return;
|
||||
|
||||
switch (rc) {
|
||||
case -1:
|
||||
/* change_conf hasn't been executed */
|
||||
break;
|
||||
case 0:
|
||||
cifs_dbg(VFS, "Possible client or server bug - zero credits\n");
|
||||
break;
|
||||
case 1:
|
||||
cifs_dbg(VFS, "disabling echoes and oplocks\n");
|
||||
break;
|
||||
case 2:
|
||||
cifs_dbg(FYI, "disabling oplocks\n");
|
||||
break;
|
||||
default:
|
||||
cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -153,14 +171,14 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
|
||||
|
||||
scredits = server->credits;
|
||||
/* can deadlock with reopen */
|
||||
if (scredits == 1) {
|
||||
if (scredits <= 8) {
|
||||
*num = SMB2_MAX_BUFFER_SIZE;
|
||||
*credits = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* leave one credit for a possible reopen */
|
||||
scredits--;
|
||||
/* leave some credits for reopen and other ops */
|
||||
scredits -= 8;
|
||||
*num = min_t(unsigned int, size,
|
||||
scredits * SMB2_MAX_BUFFER_SIZE);
|
||||
|
||||
@@ -2531,11 +2549,23 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
|
||||
server->ops->is_status_pending(buf, server, 0))
|
||||
return -1;
|
||||
|
||||
rdata->result = server->ops->map_error(buf, false);
|
||||
/* set up first two iov to get credits */
|
||||
rdata->iov[0].iov_base = buf;
|
||||
rdata->iov[0].iov_len = 4;
|
||||
rdata->iov[1].iov_base = buf + 4;
|
||||
rdata->iov[1].iov_len =
|
||||
min_t(unsigned int, buf_len, server->vals->read_rsp_size) - 4;
|
||||
cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
|
||||
rdata->iov[0].iov_base, rdata->iov[0].iov_len);
|
||||
cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
|
||||
rdata->iov[1].iov_base, rdata->iov[1].iov_len);
|
||||
|
||||
rdata->result = server->ops->map_error(buf, true);
|
||||
if (rdata->result != 0) {
|
||||
cifs_dbg(FYI, "%s: server returned error %d\n",
|
||||
__func__, rdata->result);
|
||||
dequeue_mid(mid, rdata->result);
|
||||
/* normal error on read response */
|
||||
dequeue_mid(mid, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2605,14 +2635,6 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* set up first iov for signature check */
|
||||
rdata->iov[0].iov_base = buf;
|
||||
rdata->iov[0].iov_len = 4;
|
||||
rdata->iov[1].iov_base = buf + 4;
|
||||
rdata->iov[1].iov_len = server->vals->read_rsp_size - 4;
|
||||
cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
|
||||
rdata->iov[0].iov_base, server->vals->read_rsp_size);
|
||||
|
||||
length = rdata->copy_into_pages(server, rdata, &iter);
|
||||
|
||||
kfree(bvec);
|
||||
|
||||
@@ -733,6 +733,7 @@ static void truncate_node(struct dnode_of_data *dn)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
|
||||
struct node_info ni;
|
||||
pgoff_t index;
|
||||
|
||||
get_node_info(sbi, dn->nid, &ni);
|
||||
|
||||
@@ -750,10 +751,11 @@ static void truncate_node(struct dnode_of_data *dn)
|
||||
clear_node_page_dirty(dn->node_page);
|
||||
set_sbi_flag(sbi, SBI_IS_DIRTY);
|
||||
|
||||
index = dn->node_page->index;
|
||||
f2fs_put_page(dn->node_page, 1);
|
||||
|
||||
invalidate_mapping_pages(NODE_MAPPING(sbi),
|
||||
dn->node_page->index, dn->node_page->index);
|
||||
index, index);
|
||||
|
||||
dn->node_page = NULL;
|
||||
trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
|
||||
|
||||
@@ -33,3 +33,17 @@
|
||||
|
||||
#define __nocfi __attribute__((no_sanitize("cfi")))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Not all versions of clang implement the the type-generic versions
|
||||
* of the builtin overflow checkers. Fortunately, clang implements
|
||||
* __has_builtin allowing us to avoid awkward version
|
||||
* checks. Unfortunately, we don't know which version of gcc clang
|
||||
* pretends to be, so the macro may or may not be defined.
|
||||
*/
|
||||
#undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
|
||||
#if __has_builtin(__builtin_mul_overflow) && \
|
||||
__has_builtin(__builtin_add_overflow) && \
|
||||
__has_builtin(__builtin_sub_overflow)
|
||||
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
|
||||
#endif
|
||||
|
||||
@@ -358,3 +358,7 @@
|
||||
* code
|
||||
*/
|
||||
#define uninitialized_var(x) x = x
|
||||
|
||||
#if GCC_VERSION >= 50100
|
||||
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
|
||||
#endif
|
||||
|
||||
@@ -44,3 +44,7 @@
|
||||
#define __builtin_bswap16 _bswap16
|
||||
#endif
|
||||
|
||||
/*
|
||||
* icc defines __GNUC__, but does not implement the builtin overflow checkers.
|
||||
*/
|
||||
#undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
|
||||
|
||||
@@ -1130,8 +1130,9 @@ struct hv_ring_buffer_debug_info {
|
||||
u32 bytes_avail_towrite;
|
||||
};
|
||||
|
||||
void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
||||
struct hv_ring_buffer_debug_info *debug_info);
|
||||
|
||||
int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
||||
struct hv_ring_buffer_debug_info *debug_info);
|
||||
|
||||
/* Vmbus interface */
|
||||
#define vmbus_driver_register(driver) \
|
||||
|
||||
205
include/linux/overflow.h
Normal file
205
include/linux/overflow.h
Normal file
@@ -0,0 +1,205 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
|
||||
#ifndef __LINUX_OVERFLOW_H
|
||||
#define __LINUX_OVERFLOW_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
||||
/*
|
||||
* In the fallback code below, we need to compute the minimum and
|
||||
* maximum values representable in a given type. These macros may also
|
||||
* be useful elsewhere, so we provide them outside the
|
||||
* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
|
||||
*
|
||||
* It would seem more obvious to do something like
|
||||
*
|
||||
* #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
|
||||
* #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
|
||||
*
|
||||
* Unfortunately, the middle expressions, strictly speaking, have
|
||||
* undefined behaviour, and at least some versions of gcc warn about
|
||||
* the type_max expression (but not if -fsanitize=undefined is in
|
||||
* effect; in that case, the warning is deferred to runtime...).
|
||||
*
|
||||
* The slightly excessive casting in type_min is to make sure the
|
||||
* macros also produce sensible values for the exotic type _Bool. [The
|
||||
* overflow checkers only almost work for _Bool, but that's
|
||||
* a-feature-not-a-bug, since people shouldn't be doing arithmetic on
|
||||
* _Bools. Besides, the gcc builtins don't allow _Bool* as third
|
||||
* argument.]
|
||||
*
|
||||
* Idea stolen from
|
||||
* https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
|
||||
* credit to Christian Biere.
|
||||
*/
|
||||
#define is_signed_type(type) (((type)(-1)) < (type)1)
|
||||
#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
|
||||
#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
|
||||
#define type_min(T) ((T)((T)-type_max(T)-(T)1))
|
||||
|
||||
|
||||
#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
|
||||
/*
|
||||
* For simplicity and code hygiene, the fallback code below insists on
|
||||
* a, b and *d having the same type (similar to the min() and max()
|
||||
* macros), whereas gcc's type-generic overflow checkers accept
|
||||
* different types. Hence we don't just make check_add_overflow an
|
||||
* alias for __builtin_add_overflow, but add type checks similar to
|
||||
* below.
|
||||
*/
|
||||
#define check_add_overflow(a, b, d) ({ \
|
||||
typeof(a) __a = (a); \
|
||||
typeof(b) __b = (b); \
|
||||
typeof(d) __d = (d); \
|
||||
(void) (&__a == &__b); \
|
||||
(void) (&__a == __d); \
|
||||
__builtin_add_overflow(__a, __b, __d); \
|
||||
})
|
||||
|
||||
#define check_sub_overflow(a, b, d) ({ \
|
||||
typeof(a) __a = (a); \
|
||||
typeof(b) __b = (b); \
|
||||
typeof(d) __d = (d); \
|
||||
(void) (&__a == &__b); \
|
||||
(void) (&__a == __d); \
|
||||
__builtin_sub_overflow(__a, __b, __d); \
|
||||
})
|
||||
|
||||
#define check_mul_overflow(a, b, d) ({ \
|
||||
typeof(a) __a = (a); \
|
||||
typeof(b) __b = (b); \
|
||||
typeof(d) __d = (d); \
|
||||
(void) (&__a == &__b); \
|
||||
(void) (&__a == __d); \
|
||||
__builtin_mul_overflow(__a, __b, __d); \
|
||||
})
|
||||
|
||||
#else
|
||||
|
||||
|
||||
/* Checking for unsigned overflow is relatively easy without causing UB. */
|
||||
#define __unsigned_add_overflow(a, b, d) ({ \
|
||||
typeof(a) __a = (a); \
|
||||
typeof(b) __b = (b); \
|
||||
typeof(d) __d = (d); \
|
||||
(void) (&__a == &__b); \
|
||||
(void) (&__a == __d); \
|
||||
*__d = __a + __b; \
|
||||
*__d < __a; \
|
||||
})
|
||||
#define __unsigned_sub_overflow(a, b, d) ({ \
|
||||
typeof(a) __a = (a); \
|
||||
typeof(b) __b = (b); \
|
||||
typeof(d) __d = (d); \
|
||||
(void) (&__a == &__b); \
|
||||
(void) (&__a == __d); \
|
||||
*__d = __a - __b; \
|
||||
__a < __b; \
|
||||
})
|
||||
/*
|
||||
* If one of a or b is a compile-time constant, this avoids a division.
|
||||
*/
|
||||
#define __unsigned_mul_overflow(a, b, d) ({ \
|
||||
typeof(a) __a = (a); \
|
||||
typeof(b) __b = (b); \
|
||||
typeof(d) __d = (d); \
|
||||
(void) (&__a == &__b); \
|
||||
(void) (&__a == __d); \
|
||||
*__d = __a * __b; \
|
||||
__builtin_constant_p(__b) ? \
|
||||
__b > 0 && __a > type_max(typeof(__a)) / __b : \
|
||||
__a > 0 && __b > type_max(typeof(__b)) / __a; \
|
||||
})
|
||||
|
||||
/*
|
||||
* For signed types, detecting overflow is much harder, especially if
|
||||
* we want to avoid UB. But the interface of these macros is such that
|
||||
* we must provide a result in *d, and in fact we must produce the
|
||||
* result promised by gcc's builtins, which is simply the possibly
|
||||
* wrapped-around value. Fortunately, we can just formally do the
|
||||
* operations in the widest relevant unsigned type (u64) and then
|
||||
* truncate the result - gcc is smart enough to generate the same code
|
||||
* with and without the (u64) casts.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Adding two signed integers can overflow only if they have the same
|
||||
* sign, and overflow has happened iff the result has the opposite
|
||||
* sign.
|
||||
*/
|
||||
#define __signed_add_overflow(a, b, d) ({ \
|
||||
typeof(a) __a = (a); \
|
||||
typeof(b) __b = (b); \
|
||||
typeof(d) __d = (d); \
|
||||
(void) (&__a == &__b); \
|
||||
(void) (&__a == __d); \
|
||||
*__d = (u64)__a + (u64)__b; \
|
||||
(((~(__a ^ __b)) & (*__d ^ __a)) \
|
||||
& type_min(typeof(__a))) != 0; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Subtraction is similar, except that overflow can now happen only
|
||||
* when the signs are opposite. In this case, overflow has happened if
|
||||
* the result has the opposite sign of a.
|
||||
*/
|
||||
#define __signed_sub_overflow(a, b, d) ({ \
|
||||
typeof(a) __a = (a); \
|
||||
typeof(b) __b = (b); \
|
||||
typeof(d) __d = (d); \
|
||||
(void) (&__a == &__b); \
|
||||
(void) (&__a == __d); \
|
||||
*__d = (u64)__a - (u64)__b; \
|
||||
((((__a ^ __b)) & (*__d ^ __a)) \
|
||||
& type_min(typeof(__a))) != 0; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Signed multiplication is rather hard. gcc always follows C99, so
|
||||
* division is truncated towards 0. This means that we can write the
|
||||
* overflow check like this:
|
||||
*
|
||||
* (a > 0 && (b > MAX/a || b < MIN/a)) ||
|
||||
* (a < -1 && (b > MIN/a || b < MAX/a) ||
|
||||
* (a == -1 && b == MIN)
|
||||
*
|
||||
* The redundant casts of -1 are to silence an annoying -Wtype-limits
|
||||
* (included in -Wextra) warning: When the type is u8 or u16, the
|
||||
* __b_c_e in check_mul_overflow obviously selects
|
||||
* __unsigned_mul_overflow, but unfortunately gcc still parses this
|
||||
* code and warns about the limited range of __b.
|
||||
*/
|
||||
|
||||
#define __signed_mul_overflow(a, b, d) ({ \
|
||||
typeof(a) __a = (a); \
|
||||
typeof(b) __b = (b); \
|
||||
typeof(d) __d = (d); \
|
||||
typeof(a) __tmax = type_max(typeof(a)); \
|
||||
typeof(a) __tmin = type_min(typeof(a)); \
|
||||
(void) (&__a == &__b); \
|
||||
(void) (&__a == __d); \
|
||||
*__d = (u64)__a * (u64)__b; \
|
||||
(__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \
|
||||
(__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \
|
||||
(__b == (typeof(__b))-1 && __a == __tmin); \
|
||||
})
|
||||
|
||||
|
||||
#define check_add_overflow(a, b, d) \
|
||||
__builtin_choose_expr(is_signed_type(typeof(a)), \
|
||||
__signed_add_overflow(a, b, d), \
|
||||
__unsigned_add_overflow(a, b, d))
|
||||
|
||||
#define check_sub_overflow(a, b, d) \
|
||||
__builtin_choose_expr(is_signed_type(typeof(a)), \
|
||||
__signed_sub_overflow(a, b, d), \
|
||||
__unsigned_sub_overflow(a, b, d))
|
||||
|
||||
#define check_mul_overflow(a, b, d) \
|
||||
__builtin_choose_expr(is_signed_type(typeof(a)), \
|
||||
__signed_mul_overflow(a, b, d), \
|
||||
__unsigned_mul_overflow(a, b, d))
|
||||
|
||||
|
||||
#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
|
||||
|
||||
#endif /* __LINUX_OVERFLOW_H */
|
||||
@@ -3163,6 +3163,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
|
||||
*
|
||||
* This is exactly the same as pskb_trim except that it ensures the
|
||||
* checksum of received packets are still valid after the operation.
|
||||
* It can change skb pointers.
|
||||
*/
|
||||
|
||||
static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
|
||||
|
||||
@@ -233,7 +233,7 @@ int fib_table_delete(struct net *, struct fib_table *, struct fib_config *,
|
||||
struct netlink_ext_ack *extack);
|
||||
int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
|
||||
struct netlink_callback *cb);
|
||||
int fib_table_flush(struct net *net, struct fib_table *table);
|
||||
int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all);
|
||||
struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
|
||||
void fib_table_flush_external(struct fib_table *table);
|
||||
void fib_free_table(struct fib_table *tb);
|
||||
|
||||
@@ -178,4 +178,46 @@ DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_vcpu_info);
|
||||
|
||||
/* Send an NMI to the specified VCPU. @extra_arg == NULL. */
|
||||
#define VCPUOP_send_nmi 11
|
||||
|
||||
/*
|
||||
* Get the physical ID information for a pinned vcpu's underlying physical
|
||||
* processor. The physical ID informmation is architecture-specific.
|
||||
* On x86: id[31:0]=apic_id, id[63:32]=acpi_id.
|
||||
* This command returns -EINVAL if it is not a valid operation for this VCPU.
|
||||
*/
|
||||
#define VCPUOP_get_physid 12 /* arg == vcpu_get_physid_t */
|
||||
struct vcpu_get_physid {
|
||||
uint64_t phys_id;
|
||||
};
|
||||
DEFINE_GUEST_HANDLE_STRUCT(vcpu_get_physid);
|
||||
#define xen_vcpu_physid_to_x86_apicid(physid) ((uint32_t)(physid))
|
||||
#define xen_vcpu_physid_to_x86_acpiid(physid) ((uint32_t)((physid) >> 32))
|
||||
|
||||
/*
|
||||
* Register a memory location to get a secondary copy of the vcpu time
|
||||
* parameters. The master copy still exists as part of the vcpu shared
|
||||
* memory area, and this secondary copy is updated whenever the master copy
|
||||
* is updated (and using the same versioning scheme for synchronisation).
|
||||
*
|
||||
* The intent is that this copy may be mapped (RO) into userspace so
|
||||
* that usermode can compute system time using the time info and the
|
||||
* tsc. Usermode will see an array of vcpu_time_info structures, one
|
||||
* for each vcpu, and choose the right one by an existing mechanism
|
||||
* which allows it to get the current vcpu number (such as via a
|
||||
* segment limit). It can then apply the normal algorithm to compute
|
||||
* system time from the tsc.
|
||||
*
|
||||
* @extra_arg == pointer to vcpu_register_time_info_memory_area structure.
|
||||
*/
|
||||
#define VCPUOP_register_vcpu_time_memory_area 13
|
||||
DEFINE_GUEST_HANDLE_STRUCT(vcpu_time_info);
|
||||
struct vcpu_register_time_memory_area {
|
||||
union {
|
||||
GUEST_HANDLE(vcpu_time_info) h;
|
||||
struct pvclock_vcpu_time_info *v;
|
||||
uint64_t p;
|
||||
} addr;
|
||||
};
|
||||
DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_time_memory_area);
|
||||
|
||||
#endif /* __XEN_PUBLIC_VCPU_H__ */
|
||||
|
||||
@@ -685,6 +685,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
|
||||
* set up the signal and overrun bookkeeping.
|
||||
*/
|
||||
timer->it.cpu.incr = timespec64_to_ns(&new->it_interval);
|
||||
timer->it_interval = ns_to_ktime(timer->it.cpu.incr);
|
||||
|
||||
/*
|
||||
* This acts as a modification timestamp for the timer,
|
||||
|
||||
@@ -35,10 +35,10 @@ static inline int should_deliver(const struct net_bridge_port *p,
|
||||
|
||||
int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
skb_push(skb, ETH_HLEN);
|
||||
if (!is_skb_forwardable(skb->dev, skb))
|
||||
goto drop;
|
||||
|
||||
skb_push(skb, ETH_HLEN);
|
||||
br_drop_fake_rtable(skb);
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL &&
|
||||
@@ -96,12 +96,11 @@ static void __br_forward(const struct net_bridge_port *to,
|
||||
net = dev_net(indev);
|
||||
} else {
|
||||
if (unlikely(netpoll_tx_running(to->br->dev))) {
|
||||
if (!is_skb_forwardable(skb->dev, skb)) {
|
||||
skb_push(skb, ETH_HLEN);
|
||||
if (!is_skb_forwardable(skb->dev, skb))
|
||||
kfree_skb(skb);
|
||||
} else {
|
||||
skb_push(skb, ETH_HLEN);
|
||||
else
|
||||
br_netpoll_send_skb(to, skb);
|
||||
}
|
||||
return;
|
||||
}
|
||||
br_hook = NF_BR_LOCAL_OUT;
|
||||
|
||||
@@ -131,6 +131,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
|
||||
IPSTATS_MIB_INDISCARDS);
|
||||
goto drop;
|
||||
}
|
||||
hdr = ipv6_hdr(skb);
|
||||
}
|
||||
if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb))
|
||||
goto drop;
|
||||
|
||||
@@ -230,6 +230,7 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
|
||||
pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
|
||||
return false;
|
||||
|
||||
ip6h = ipv6_hdr(skb);
|
||||
thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
|
||||
if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
|
||||
return false;
|
||||
|
||||
@@ -67,6 +67,9 @@
|
||||
*/
|
||||
#define MAX_NFRAMES 256
|
||||
|
||||
/* limit timers to 400 days for sending/timeouts */
|
||||
#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
|
||||
|
||||
/* use of last_frames[index].flags */
|
||||
#define RX_RECV 0x40 /* received data for this element */
|
||||
#define RX_THR 0x80 /* element not been sent due to throttle feature */
|
||||
@@ -140,6 +143,22 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
|
||||
return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
|
||||
}
|
||||
|
||||
/* check limitations for timeval provided by user */
|
||||
static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
|
||||
{
|
||||
if ((msg_head->ival1.tv_sec < 0) ||
|
||||
(msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
|
||||
(msg_head->ival1.tv_usec < 0) ||
|
||||
(msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
|
||||
(msg_head->ival2.tv_sec < 0) ||
|
||||
(msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
|
||||
(msg_head->ival2.tv_usec < 0) ||
|
||||
(msg_head->ival2.tv_usec >= USEC_PER_SEC))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
|
||||
#define OPSIZ sizeof(struct bcm_op)
|
||||
#define MHSIZ sizeof(struct bcm_msg_head)
|
||||
@@ -886,6 +905,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
|
||||
if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
|
||||
return -EINVAL;
|
||||
|
||||
/* check timeval limitations */
|
||||
if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
|
||||
return -EINVAL;
|
||||
|
||||
/* check the given can_id */
|
||||
op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
|
||||
if (op) {
|
||||
@@ -1065,6 +1088,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
|
||||
(!(msg_head->can_id & CAN_RTR_FLAG))))
|
||||
return -EINVAL;
|
||||
|
||||
/* check timeval limitations */
|
||||
if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
|
||||
return -EINVAL;
|
||||
|
||||
/* check the given can_id */
|
||||
op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
|
||||
if (op) {
|
||||
|
||||
@@ -193,7 +193,7 @@ static void fib_flush(struct net *net)
|
||||
struct fib_table *tb;
|
||||
|
||||
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
|
||||
flushed += fib_table_flush(net, tb);
|
||||
flushed += fib_table_flush(net, tb, false);
|
||||
}
|
||||
|
||||
if (flushed)
|
||||
@@ -1299,7 +1299,7 @@ static void ip_fib_net_exit(struct net *net)
|
||||
|
||||
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
|
||||
hlist_del(&tb->tb_hlist);
|
||||
fib_table_flush(net, tb);
|
||||
fib_table_flush(net, tb, true);
|
||||
fib_free_table(tb);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1836,7 +1836,7 @@ void fib_table_flush_external(struct fib_table *tb)
|
||||
}
|
||||
|
||||
/* Caller must hold RTNL. */
|
||||
int fib_table_flush(struct net *net, struct fib_table *tb)
|
||||
int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
|
||||
{
|
||||
struct trie *t = (struct trie *)tb->tb_data;
|
||||
struct key_vector *pn = t->kv;
|
||||
@@ -1884,8 +1884,17 @@ int fib_table_flush(struct net *net, struct fib_table *tb)
|
||||
hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
|
||||
struct fib_info *fi = fa->fa_info;
|
||||
|
||||
if (!fi || !(fi->fib_flags & RTNH_F_DEAD) ||
|
||||
tb->tb_id != fa->tb_id) {
|
||||
if (!fi || tb->tb_id != fa->tb_id ||
|
||||
(!(fi->fib_flags & RTNH_F_DEAD) &&
|
||||
!fib_props[fa->fa_type].error)) {
|
||||
slen = fa->fa_slen;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Do not flush error routes if network namespace is
|
||||
* not being dismantled
|
||||
*/
|
||||
if (!flush_all && fib_props[fa->fa_type].error) {
|
||||
slen = fa->fa_slen;
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -90,7 +90,7 @@ static void inet_frags_free_cb(void *ptr, void *arg)
|
||||
|
||||
void inet_frags_exit_net(struct netns_frags *nf)
|
||||
{
|
||||
nf->low_thresh = 0; /* prevent creation of new frags */
|
||||
nf->high_thresh = 0; /* prevent creation of new frags */
|
||||
|
||||
rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL);
|
||||
}
|
||||
|
||||
@@ -481,6 +481,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
|
||||
goto drop;
|
||||
}
|
||||
|
||||
iph = ip_hdr(skb);
|
||||
skb->transport_header = skb->network_header + iph->ihl*4;
|
||||
|
||||
/* Remove any debris in the socket control block */
|
||||
|
||||
@@ -1178,7 +1178,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
flags = msg->msg_flags;
|
||||
|
||||
if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
|
||||
if (sk->sk_state != TCP_ESTABLISHED) {
|
||||
if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
|
||||
err = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
@@ -459,7 +459,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
|
||||
if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
|
||||
attrs |= 1 << type;
|
||||
a[type] = nla;
|
||||
}
|
||||
|
||||
@@ -318,7 +318,6 @@ EXPORT_SYMBOL(tcf_block_put);
|
||||
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
struct tcf_result *res, bool compat_mode)
|
||||
{
|
||||
__be16 protocol = tc_skb_protocol(skb);
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
const int max_reclassify_loop = 4;
|
||||
const struct tcf_proto *orig_tp = tp;
|
||||
@@ -328,6 +327,7 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
||||
reclassify:
|
||||
#endif
|
||||
for (; tp; tp = rcu_dereference_bh(tp->next)) {
|
||||
__be16 protocol = tc_skb_protocol(skb);
|
||||
int err;
|
||||
|
||||
if (tp->protocol != protocol &&
|
||||
@@ -359,7 +359,6 @@ reset:
|
||||
}
|
||||
|
||||
tp = first_tp;
|
||||
protocol = tc_skb_protocol(skb);
|
||||
goto reclassify;
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -969,6 +969,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
|
||||
SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
|
||||
SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
|
||||
SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
|
||||
SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
|
||||
SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
|
||||
SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
|
||||
SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
|
||||
|
||||
@@ -265,6 +265,8 @@ static int rt5514_spi_pcm_probe(struct snd_soc_platform *platform)
|
||||
|
||||
rt5514_dsp = devm_kzalloc(platform->dev, sizeof(*rt5514_dsp),
|
||||
GFP_KERNEL);
|
||||
if (!rt5514_dsp)
|
||||
return -ENOMEM;
|
||||
|
||||
rt5514_dsp->dev = &rt5514_spi->dev;
|
||||
mutex_init(&rt5514_dsp->dma_lock);
|
||||
|
||||
@@ -399,7 +399,13 @@ static int sst_media_hw_params(struct snd_pcm_substream *substream,
|
||||
struct snd_pcm_hw_params *params,
|
||||
struct snd_soc_dai *dai)
|
||||
{
|
||||
snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
|
||||
int ret;
|
||||
|
||||
ret =
|
||||
snd_pcm_lib_malloc_pages(substream,
|
||||
params_buffer_bytes(params));
|
||||
if (ret)
|
||||
return ret;
|
||||
memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -44,13 +44,13 @@ static int __report_module(struct addr_location *al, u64 ip,
|
||||
Dwarf_Addr s;
|
||||
|
||||
dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
|
||||
if (s != al->map->start)
|
||||
if (s != al->map->start - al->map->pgoff)
|
||||
mod = 0;
|
||||
}
|
||||
|
||||
if (!mod)
|
||||
mod = dwfl_report_elf(ui->dwfl, dso->short_name,
|
||||
dso->long_name, -1, al->map->start,
|
||||
(dso->symsrc_filename ? dso->symsrc_filename : dso->long_name), -1, al->map->start - al->map->pgoff,
|
||||
false);
|
||||
|
||||
return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1;
|
||||
|
||||
@@ -145,15 +145,6 @@ struct seccomp_data {
|
||||
#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
|
||||
#endif
|
||||
|
||||
#ifndef PTRACE_SECCOMP_GET_METADATA
|
||||
#define PTRACE_SECCOMP_GET_METADATA 0x420d
|
||||
|
||||
struct seccomp_metadata {
|
||||
__u64 filter_off; /* Input: which filter */
|
||||
__u64 flags; /* Output: filter's flags */
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifndef seccomp
|
||||
int seccomp(unsigned int op, unsigned int flags, void *args)
|
||||
{
|
||||
@@ -2870,58 +2861,6 @@ TEST(get_action_avail)
|
||||
EXPECT_EQ(errno, EOPNOTSUPP);
|
||||
}
|
||||
|
||||
TEST(get_metadata)
|
||||
{
|
||||
pid_t pid;
|
||||
int pipefd[2];
|
||||
char buf;
|
||||
struct seccomp_metadata md;
|
||||
|
||||
ASSERT_EQ(0, pipe(pipefd));
|
||||
|
||||
pid = fork();
|
||||
ASSERT_GE(pid, 0);
|
||||
if (pid == 0) {
|
||||
struct sock_filter filter[] = {
|
||||
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
|
||||
};
|
||||
struct sock_fprog prog = {
|
||||
.len = (unsigned short)ARRAY_SIZE(filter),
|
||||
.filter = filter,
|
||||
};
|
||||
|
||||
/* one with log, one without */
|
||||
ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER,
|
||||
SECCOMP_FILTER_FLAG_LOG, &prog));
|
||||
ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog));
|
||||
|
||||
ASSERT_EQ(0, close(pipefd[0]));
|
||||
ASSERT_EQ(1, write(pipefd[1], "1", 1));
|
||||
ASSERT_EQ(0, close(pipefd[1]));
|
||||
|
||||
while (1)
|
||||
sleep(100);
|
||||
}
|
||||
|
||||
ASSERT_EQ(0, close(pipefd[1]));
|
||||
ASSERT_EQ(1, read(pipefd[0], &buf, 1));
|
||||
|
||||
ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid));
|
||||
ASSERT_EQ(pid, waitpid(pid, NULL, 0));
|
||||
|
||||
md.filter_off = 0;
|
||||
ASSERT_EQ(sizeof(md), ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md));
|
||||
EXPECT_EQ(md.flags, SECCOMP_FILTER_FLAG_LOG);
|
||||
EXPECT_EQ(md.filter_off, 0);
|
||||
|
||||
md.filter_off = 1;
|
||||
ASSERT_EQ(sizeof(md), ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md));
|
||||
EXPECT_EQ(md.flags, 0);
|
||||
EXPECT_EQ(md.filter_off, 1);
|
||||
|
||||
ASSERT_EQ(0, kill(pid, SIGKILL));
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO:
|
||||
* - add microbenchmarks
|
||||
|
||||
@@ -1133,6 +1133,21 @@ void test_pkey_syscalls_bad_args(int *ptr, u16 pkey)
|
||||
pkey_assert(err);
|
||||
}
|
||||
|
||||
void become_child(void)
|
||||
{
|
||||
pid_t forkret;
|
||||
|
||||
forkret = fork();
|
||||
pkey_assert(forkret >= 0);
|
||||
dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
|
||||
|
||||
if (!forkret) {
|
||||
/* in the child */
|
||||
return;
|
||||
}
|
||||
exit(0);
|
||||
}
|
||||
|
||||
/* Assumes that all pkeys other than 'pkey' are unallocated */
|
||||
void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
|
||||
{
|
||||
@@ -1141,7 +1156,7 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
|
||||
int nr_allocated_pkeys = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_PKEYS*2; i++) {
|
||||
for (i = 0; i < NR_PKEYS*3; i++) {
|
||||
int new_pkey;
|
||||
dprintf1("%s() alloc loop: %d\n", __func__, i);
|
||||
new_pkey = alloc_pkey();
|
||||
@@ -1152,20 +1167,26 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
|
||||
if ((new_pkey == -1) && (errno == ENOSPC)) {
|
||||
dprintf2("%s() failed to allocate pkey after %d tries\n",
|
||||
__func__, nr_allocated_pkeys);
|
||||
break;
|
||||
} else {
|
||||
/*
|
||||
* Ensure the number of successes never
|
||||
* exceeds the number of keys supported
|
||||
* in the hardware.
|
||||
*/
|
||||
pkey_assert(nr_allocated_pkeys < NR_PKEYS);
|
||||
allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
|
||||
}
|
||||
pkey_assert(nr_allocated_pkeys < NR_PKEYS);
|
||||
allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
|
||||
|
||||
/*
|
||||
* Make sure that allocation state is properly
|
||||
* preserved across fork().
|
||||
*/
|
||||
if (i == NR_PKEYS*2)
|
||||
become_child();
|
||||
}
|
||||
|
||||
dprintf3("%s()::%d\n", __func__, __LINE__);
|
||||
|
||||
/*
|
||||
* ensure it did not reach the end of the loop without
|
||||
* failure:
|
||||
*/
|
||||
pkey_assert(i < NR_PKEYS*2);
|
||||
|
||||
/*
|
||||
* There are 16 pkeys supported in hardware. Three are
|
||||
* allocated by the time we get here:
|
||||
|
||||
Reference in New Issue
Block a user