Merge 4.9.135 into android-4.9
Changes in 4.9.135 media: af9035: prevent buffer overflow on write batman-adv: Fix segfault when writing to throughput_override batman-adv: Fix segfault when writing to sysfs elp_interval batman-adv: Prevent duplicated nc_node entry batman-adv: Prevent duplicated softif_vlan entry batman-adv: Prevent duplicated global TT entry batman-adv: Prevent duplicated tvlv handler batman-adv: fix backbone_gw refcount on queue_work() failure batman-adv: fix hardif_neigh refcount on queue_work() failure clocksource/drivers/ti-32k: Add CLOCK_SOURCE_SUSPEND_NONSTOP flag for non-am43 SoCs scsi: ibmvscsis: Fix a stringop-overflow warning scsi: ibmvscsis: Ensure partition name is properly NUL terminated Input: atakbd - fix Atari keymap Input: atakbd - fix Atari CapsLock behaviour ravb: do not write 1 to reserved bits drm: mali-dp: Call drm_crtc_vblank_reset on device init scsi: sd: don't crash the host on invalid commands net/mlx4: Use cpumask_available for eq->affinity_mask powerpc/tm: Fix userspace r13 corruption powerpc/tm: Avoid possible userspace r1 corruption on reclaim iommu/amd: Return devid as alias for ACPI HID devices mremap: properly flush TLB before releasing the page mm: Preserve _PAGE_DEVMAP across mprotect() calls netfilter: check for seqadj ext existence before adding it in nf_nat_setup_info ARC: build: Get rid of toolchain check ARC: build: Don't set CROSS_COMPILE in arch's Makefile HID: quirks: fix support for Apple Magic Keyboards usb: gadget: serial: fix oops when data rx'd after close sched/cputime: Convert kcpustat to nsecs macintosh/rack-meter: Convert cputime64_t use to u64 sched/cputime: Increment kcpustat directly on irqtime account sched/cputime: Fix ksoftirqd cputime accounting regression ext4: avoid running out of journal credits when appending to an inline file HV: properly delay KVP packets when negotiation is in progress Linux 4.9.135 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,6 +1,6 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 134
|
||||
SUBLEVEL = 135
|
||||
EXTRAVERSION =
|
||||
NAME = Roaring Lionus
|
||||
|
||||
|
||||
@@ -8,34 +8,12 @@
|
||||
|
||||
UTS_MACHINE := arc
|
||||
|
||||
ifeq ($(CROSS_COMPILE),)
|
||||
ifndef CONFIG_CPU_BIG_ENDIAN
|
||||
CROSS_COMPILE := arc-linux-
|
||||
else
|
||||
CROSS_COMPILE := arceb-linux-
|
||||
endif
|
||||
endif
|
||||
|
||||
KBUILD_DEFCONFIG := nsim_700_defconfig
|
||||
|
||||
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
|
||||
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
|
||||
cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
|
||||
|
||||
is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
|
||||
|
||||
ifdef CONFIG_ISA_ARCOMPACT
|
||||
ifeq ($(is_700), 0)
|
||||
$(error Toolchain not configured for ARCompact builds)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_ISA_ARCV2
|
||||
ifeq ($(is_700), 1)
|
||||
$(error Toolchain not configured for ARCv2 builds)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_ARC_CURR_IN_REG
|
||||
# For a global register defintion, make sure it gets passed to every file
|
||||
# We had a customer reported bug where some code built in kernel was NOT using
|
||||
@@ -89,7 +67,7 @@ ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
|
||||
# --build-id w/o "-marclinux". Default arc-elf32-ld is OK
|
||||
ldflags-$(upto_gcc44) += -marclinux
|
||||
|
||||
LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
|
||||
LIBGCC = $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
|
||||
|
||||
# Modules with short calls might break for calls into builtin-kernel
|
||||
KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode
|
||||
|
||||
@@ -166,13 +166,27 @@ _GLOBAL(tm_reclaim)
|
||||
std r1, PACATMSCRATCH(r13)
|
||||
ld r1, PACAR1(r13)
|
||||
|
||||
/* Store the PPR in r11 and reset to decent value */
|
||||
std r11, GPR11(r1) /* Temporary stash */
|
||||
|
||||
/*
|
||||
* Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
|
||||
* clobbered by an exception once we turn on MSR_RI below.
|
||||
*/
|
||||
ld r11, PACATMSCRATCH(r13)
|
||||
std r11, GPR1(r1)
|
||||
|
||||
/*
|
||||
* Store r13 away so we can free up the scratch SPR for the SLB fault
|
||||
* handler (needed once we start accessing the thread_struct).
|
||||
*/
|
||||
GET_SCRATCH0(r11)
|
||||
std r11, GPR13(r1)
|
||||
|
||||
/* Reset MSR RI so we can take SLB faults again */
|
||||
li r11, MSR_RI
|
||||
mtmsrd r11, 1
|
||||
|
||||
/* Store the PPR in r11 and reset to decent value */
|
||||
mfspr r11, SPRN_PPR
|
||||
HMT_MEDIUM
|
||||
|
||||
@@ -197,11 +211,11 @@ _GLOBAL(tm_reclaim)
|
||||
SAVE_GPR(8, r7) /* user r8 */
|
||||
SAVE_GPR(9, r7) /* user r9 */
|
||||
SAVE_GPR(10, r7) /* user r10 */
|
||||
ld r3, PACATMSCRATCH(r13) /* user r1 */
|
||||
ld r3, GPR1(r1) /* user r1 */
|
||||
ld r4, GPR7(r1) /* user r7 */
|
||||
ld r5, GPR11(r1) /* user r11 */
|
||||
ld r6, GPR12(r1) /* user r12 */
|
||||
GET_SCRATCH0(8) /* user r13 */
|
||||
ld r8, GPR13(r1) /* user r13 */
|
||||
std r3, GPR1(r7)
|
||||
std r4, GPR7(r7)
|
||||
std r5, GPR11(r7)
|
||||
|
||||
@@ -113,21 +113,21 @@ static void appldata_get_os_data(void *data)
|
||||
j = 0;
|
||||
for_each_online_cpu(i) {
|
||||
os_data->os_cpu[j].per_cpu_user =
|
||||
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
|
||||
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
|
||||
os_data->os_cpu[j].per_cpu_nice =
|
||||
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
|
||||
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
|
||||
os_data->os_cpu[j].per_cpu_system =
|
||||
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
|
||||
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
|
||||
os_data->os_cpu[j].per_cpu_idle =
|
||||
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
|
||||
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
|
||||
os_data->os_cpu[j].per_cpu_irq =
|
||||
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
|
||||
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
|
||||
os_data->os_cpu[j].per_cpu_softirq =
|
||||
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
|
||||
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
|
||||
os_data->os_cpu[j].per_cpu_iowait =
|
||||
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
|
||||
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
|
||||
os_data->os_cpu[j].per_cpu_steal =
|
||||
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
|
||||
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
|
||||
os_data->os_cpu[j].cpu_id = i;
|
||||
j++;
|
||||
}
|
||||
|
||||
@@ -134,7 +134,7 @@
|
||||
*/
|
||||
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
|
||||
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
|
||||
_PAGE_SOFT_DIRTY)
|
||||
_PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
|
||||
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
|
||||
|
||||
/* The ASID is the lower 12 bits of CR3 */
|
||||
|
||||
@@ -98,6 +98,9 @@ static int __init ti_32k_timer_init(struct device_node *np)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (!of_machine_is_compatible("ti,am43"))
|
||||
ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
|
||||
|
||||
ti_32k_timer.counter = ti_32k_timer.base;
|
||||
|
||||
/*
|
||||
|
||||
@@ -142,7 +142,7 @@ static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
|
||||
u64 cur_wall_time;
|
||||
u64 busy_time;
|
||||
|
||||
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
|
||||
cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
|
||||
|
||||
busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
|
||||
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
|
||||
@@ -153,9 +153,9 @@ static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
|
||||
|
||||
idle_time = cur_wall_time - busy_time;
|
||||
if (wall)
|
||||
*wall = cputime_to_usecs(cur_wall_time);
|
||||
*wall = div_u64(cur_wall_time, NSEC_PER_USEC);
|
||||
|
||||
return cputime_to_usecs(idle_time);
|
||||
return div_u64(idle_time, NSEC_PER_USEC);
|
||||
}
|
||||
|
||||
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
|
||||
|
||||
@@ -152,7 +152,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
|
||||
if (ignore_nice) {
|
||||
u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||
|
||||
idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice);
|
||||
idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC);
|
||||
j_cdbs->prev_cpu_nice = cur_nice;
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cputime.h>
|
||||
|
||||
static DEFINE_SPINLOCK(cpufreq_stats_lock);
|
||||
|
||||
|
||||
@@ -378,6 +378,7 @@ static int malidp_bind(struct device *dev)
|
||||
goto irq_init_fail;
|
||||
|
||||
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
|
||||
drm_crtc_vblank_reset(&malidp->crtc);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to initialise vblank\n");
|
||||
goto vblank_fail;
|
||||
|
||||
@@ -1853,6 +1853,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
|
||||
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
|
||||
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
|
||||
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI) },
|
||||
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
|
||||
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) },
|
||||
|
||||
@@ -616,21 +616,22 @@ void hv_kvp_onchannelcallback(void *context)
|
||||
NEGO_IN_PROGRESS,
|
||||
NEGO_FINISHED} host_negotiatied = NEGO_NOT_STARTED;
|
||||
|
||||
if (host_negotiatied == NEGO_NOT_STARTED &&
|
||||
kvp_transaction.state < HVUTIL_READY) {
|
||||
if (kvp_transaction.state < HVUTIL_READY) {
|
||||
/*
|
||||
* If userspace daemon is not connected and host is asking
|
||||
* us to negotiate we need to delay to not lose messages.
|
||||
* This is important for Failover IP setting.
|
||||
*/
|
||||
host_negotiatied = NEGO_IN_PROGRESS;
|
||||
schedule_delayed_work(&kvp_host_handshake_work,
|
||||
if (host_negotiatied == NEGO_NOT_STARTED) {
|
||||
host_negotiatied = NEGO_IN_PROGRESS;
|
||||
schedule_delayed_work(&kvp_host_handshake_work,
|
||||
HV_UTIL_NEGO_TIMEOUT * HZ);
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (kvp_transaction.state > HVUTIL_READY)
|
||||
return;
|
||||
|
||||
recheck:
|
||||
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
|
||||
&requestid);
|
||||
|
||||
@@ -707,6 +708,8 @@ void hv_kvp_onchannelcallback(void *context)
|
||||
VM_PKT_DATA_INBAND, 0);
|
||||
|
||||
host_negotiatied = NEGO_FINISHED;
|
||||
|
||||
goto recheck;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -79,8 +79,7 @@ MODULE_LICENSE("GPL");
|
||||
*/
|
||||
|
||||
|
||||
static unsigned char atakbd_keycode[0x72] = { /* American layout */
|
||||
[0] = KEY_GRAVE,
|
||||
static unsigned char atakbd_keycode[0x73] = { /* American layout */
|
||||
[1] = KEY_ESC,
|
||||
[2] = KEY_1,
|
||||
[3] = KEY_2,
|
||||
@@ -121,9 +120,9 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
|
||||
[38] = KEY_L,
|
||||
[39] = KEY_SEMICOLON,
|
||||
[40] = KEY_APOSTROPHE,
|
||||
[41] = KEY_BACKSLASH, /* FIXME, '#' */
|
||||
[41] = KEY_GRAVE,
|
||||
[42] = KEY_LEFTSHIFT,
|
||||
[43] = KEY_GRAVE, /* FIXME: '~' */
|
||||
[43] = KEY_BACKSLASH,
|
||||
[44] = KEY_Z,
|
||||
[45] = KEY_X,
|
||||
[46] = KEY_C,
|
||||
@@ -149,45 +148,34 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
|
||||
[66] = KEY_F8,
|
||||
[67] = KEY_F9,
|
||||
[68] = KEY_F10,
|
||||
[69] = KEY_ESC,
|
||||
[70] = KEY_DELETE,
|
||||
[71] = KEY_KP7,
|
||||
[72] = KEY_KP8,
|
||||
[73] = KEY_KP9,
|
||||
[71] = KEY_HOME,
|
||||
[72] = KEY_UP,
|
||||
[74] = KEY_KPMINUS,
|
||||
[75] = KEY_KP4,
|
||||
[76] = KEY_KP5,
|
||||
[77] = KEY_KP6,
|
||||
[75] = KEY_LEFT,
|
||||
[77] = KEY_RIGHT,
|
||||
[78] = KEY_KPPLUS,
|
||||
[79] = KEY_KP1,
|
||||
[80] = KEY_KP2,
|
||||
[81] = KEY_KP3,
|
||||
[82] = KEY_KP0,
|
||||
[83] = KEY_KPDOT,
|
||||
[90] = KEY_KPLEFTPAREN,
|
||||
[91] = KEY_KPRIGHTPAREN,
|
||||
[92] = KEY_KPASTERISK, /* FIXME */
|
||||
[93] = KEY_KPASTERISK,
|
||||
[94] = KEY_KPPLUS,
|
||||
[95] = KEY_HELP,
|
||||
[80] = KEY_DOWN,
|
||||
[82] = KEY_INSERT,
|
||||
[83] = KEY_DELETE,
|
||||
[96] = KEY_102ND,
|
||||
[97] = KEY_KPASTERISK, /* FIXME */
|
||||
[98] = KEY_KPSLASH,
|
||||
[97] = KEY_UNDO,
|
||||
[98] = KEY_HELP,
|
||||
[99] = KEY_KPLEFTPAREN,
|
||||
[100] = KEY_KPRIGHTPAREN,
|
||||
[101] = KEY_KPSLASH,
|
||||
[102] = KEY_KPASTERISK,
|
||||
[103] = KEY_UP,
|
||||
[104] = KEY_KPASTERISK, /* FIXME */
|
||||
[105] = KEY_LEFT,
|
||||
[106] = KEY_RIGHT,
|
||||
[107] = KEY_KPASTERISK, /* FIXME */
|
||||
[108] = KEY_DOWN,
|
||||
[109] = KEY_KPASTERISK, /* FIXME */
|
||||
[110] = KEY_KPASTERISK, /* FIXME */
|
||||
[111] = KEY_KPASTERISK, /* FIXME */
|
||||
[112] = KEY_KPASTERISK, /* FIXME */
|
||||
[113] = KEY_KPASTERISK /* FIXME */
|
||||
[103] = KEY_KP7,
|
||||
[104] = KEY_KP8,
|
||||
[105] = KEY_KP9,
|
||||
[106] = KEY_KP4,
|
||||
[107] = KEY_KP5,
|
||||
[108] = KEY_KP6,
|
||||
[109] = KEY_KP1,
|
||||
[110] = KEY_KP2,
|
||||
[111] = KEY_KP3,
|
||||
[112] = KEY_KP0,
|
||||
[113] = KEY_KPDOT,
|
||||
[114] = KEY_KPENTER,
|
||||
};
|
||||
|
||||
static struct input_dev *atakbd_dev;
|
||||
@@ -195,21 +183,15 @@ static struct input_dev *atakbd_dev;
|
||||
static void atakbd_interrupt(unsigned char scancode, char down)
|
||||
{
|
||||
|
||||
if (scancode < 0x72) { /* scancodes < 0xf2 are keys */
|
||||
if (scancode < 0x73) { /* scancodes < 0xf3 are keys */
|
||||
|
||||
// report raw events here?
|
||||
|
||||
scancode = atakbd_keycode[scancode];
|
||||
|
||||
if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */
|
||||
input_report_key(atakbd_dev, scancode, 1);
|
||||
input_report_key(atakbd_dev, scancode, 0);
|
||||
input_sync(atakbd_dev);
|
||||
} else {
|
||||
input_report_key(atakbd_dev, scancode, down);
|
||||
input_sync(atakbd_dev);
|
||||
}
|
||||
} else /* scancodes >= 0xf2 are mouse data, most likely */
|
||||
input_report_key(atakbd_dev, scancode, down);
|
||||
input_sync(atakbd_dev);
|
||||
} else /* scancodes >= 0xf3 are mouse data, most likely */
|
||||
printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
|
||||
|
||||
return;
|
||||
|
||||
@@ -288,7 +288,13 @@ static u16 get_alias(struct device *dev)
|
||||
|
||||
/* The callers make sure that get_device_id() does not fail here */
|
||||
devid = get_device_id(dev);
|
||||
|
||||
/* For ACPI HID devices, we simply return the devid as such */
|
||||
if (!dev_is_pci(dev))
|
||||
return devid;
|
||||
|
||||
ivrs_alias = amd_iommu_alias_table[devid];
|
||||
|
||||
pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
|
||||
|
||||
if (ivrs_alias == pci_alias)
|
||||
|
||||
@@ -52,8 +52,8 @@ struct rackmeter_dma {
|
||||
struct rackmeter_cpu {
|
||||
struct delayed_work sniffer;
|
||||
struct rackmeter *rm;
|
||||
cputime64_t prev_wall;
|
||||
cputime64_t prev_idle;
|
||||
u64 prev_wall;
|
||||
u64 prev_idle;
|
||||
int zero;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
@@ -81,7 +81,7 @@ static int rackmeter_ignore_nice;
|
||||
/* This is copied from cpufreq_ondemand, maybe we should put it in
|
||||
* a common header somewhere
|
||||
*/
|
||||
static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
|
||||
static inline u64 get_cpu_idle_time(unsigned int cpu)
|
||||
{
|
||||
u64 retval;
|
||||
|
||||
@@ -217,23 +217,23 @@ static void rackmeter_do_timer(struct work_struct *work)
|
||||
container_of(work, struct rackmeter_cpu, sniffer.work);
|
||||
struct rackmeter *rm = rcpu->rm;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
cputime64_t cur_jiffies, total_idle_ticks;
|
||||
unsigned int total_ticks, idle_ticks;
|
||||
u64 cur_nsecs, total_idle_nsecs;
|
||||
u64 total_nsecs, idle_nsecs;
|
||||
int i, offset, load, cumm, pause;
|
||||
|
||||
cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
|
||||
total_ticks = (unsigned int) (cur_jiffies - rcpu->prev_wall);
|
||||
rcpu->prev_wall = cur_jiffies;
|
||||
cur_nsecs = jiffies64_to_nsecs(get_jiffies_64());
|
||||
total_nsecs = cur_nsecs - rcpu->prev_wall;
|
||||
rcpu->prev_wall = cur_nsecs;
|
||||
|
||||
total_idle_ticks = get_cpu_idle_time(cpu);
|
||||
idle_ticks = (unsigned int) (total_idle_ticks - rcpu->prev_idle);
|
||||
idle_ticks = min(idle_ticks, total_ticks);
|
||||
rcpu->prev_idle = total_idle_ticks;
|
||||
total_idle_nsecs = get_cpu_idle_time(cpu);
|
||||
idle_nsecs = total_idle_nsecs - rcpu->prev_idle;
|
||||
idle_nsecs = min(idle_nsecs, total_nsecs);
|
||||
rcpu->prev_idle = total_idle_nsecs;
|
||||
|
||||
/* We do a very dumb calculation to update the LEDs for now,
|
||||
* we'll do better once we have actual PWM implemented
|
||||
*/
|
||||
load = (9 * (total_ticks - idle_ticks)) / total_ticks;
|
||||
load = div64_u64(9 * (total_nsecs - idle_nsecs), total_nsecs);
|
||||
|
||||
offset = cpu << 3;
|
||||
cumm = 0;
|
||||
@@ -278,7 +278,7 @@ static void rackmeter_init_cpu_sniffer(struct rackmeter *rm)
|
||||
continue;
|
||||
rcpu = &rm->cpu[cpu];
|
||||
rcpu->prev_idle = get_cpu_idle_time(cpu);
|
||||
rcpu->prev_wall = jiffies64_to_cputime64(get_jiffies_64());
|
||||
rcpu->prev_wall = jiffies64_to_nsecs(get_jiffies_64());
|
||||
schedule_delayed_work_on(cpu, &rm->cpu[cpu].sniffer,
|
||||
msecs_to_jiffies(CPU_SAMPLING_RATE));
|
||||
}
|
||||
|
||||
@@ -406,8 +406,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
|
||||
msg[0].addr == (state->af9033_i2c_addr[1] >> 1))
|
||||
reg |= 0x100000;
|
||||
|
||||
ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
|
||||
msg[0].len - 3);
|
||||
ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
|
||||
&msg[0].buf[3],
|
||||
msg[0].len - 3)
|
||||
: -EOPNOTSUPP;
|
||||
} else {
|
||||
/* I2C write */
|
||||
u8 buf[MAX_XFER_SIZE];
|
||||
|
||||
@@ -240,7 +240,8 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
|
||||
struct mlx4_dev *dev = &priv->dev;
|
||||
struct mlx4_eq *eq = &priv->eq_table.eq[vec];
|
||||
|
||||
if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
|
||||
if (!cpumask_available(eq->affinity_mask) ||
|
||||
cpumask_empty(eq->affinity_mask))
|
||||
return;
|
||||
|
||||
hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
|
||||
|
||||
@@ -421,6 +421,7 @@ enum EIS_BIT {
|
||||
EIS_CULF1 = 0x00000080,
|
||||
EIS_TFFF = 0x00000100,
|
||||
EIS_QFS = 0x00010000,
|
||||
EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)),
|
||||
};
|
||||
|
||||
/* RIC0 */
|
||||
@@ -465,6 +466,7 @@ enum RIS0_BIT {
|
||||
RIS0_FRF15 = 0x00008000,
|
||||
RIS0_FRF16 = 0x00010000,
|
||||
RIS0_FRF17 = 0x00020000,
|
||||
RIS0_RESERVED = GENMASK(31, 18),
|
||||
};
|
||||
|
||||
/* RIC1 */
|
||||
@@ -521,6 +523,7 @@ enum RIS2_BIT {
|
||||
RIS2_QFF16 = 0x00010000,
|
||||
RIS2_QFF17 = 0x00020000,
|
||||
RIS2_RFFF = 0x80000000,
|
||||
RIS2_RESERVED = GENMASK(30, 18),
|
||||
};
|
||||
|
||||
/* TIC */
|
||||
@@ -537,6 +540,7 @@ enum TIS_BIT {
|
||||
TIS_FTF1 = 0x00000002, /* Undocumented? */
|
||||
TIS_TFUF = 0x00000100,
|
||||
TIS_TFWF = 0x00000200,
|
||||
TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4))
|
||||
};
|
||||
|
||||
/* ISS */
|
||||
@@ -610,6 +614,7 @@ enum GIC_BIT {
|
||||
enum GIS_BIT {
|
||||
GIS_PTCF = 0x00000001, /* Undocumented? */
|
||||
GIS_PTMF = 0x00000004,
|
||||
GIS_RESERVED = GENMASK(15, 10),
|
||||
};
|
||||
|
||||
/* GIE (R-Car Gen3 only) */
|
||||
|
||||
@@ -717,10 +717,11 @@ static void ravb_error_interrupt(struct net_device *ndev)
|
||||
u32 eis, ris2;
|
||||
|
||||
eis = ravb_read(ndev, EIS);
|
||||
ravb_write(ndev, ~EIS_QFS, EIS);
|
||||
ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
|
||||
if (eis & EIS_QFS) {
|
||||
ris2 = ravb_read(ndev, RIS2);
|
||||
ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
|
||||
ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
|
||||
RIS2);
|
||||
|
||||
/* Receive Descriptor Empty int */
|
||||
if (ris2 & RIS2_QFF0)
|
||||
@@ -773,7 +774,7 @@ static bool ravb_timestamp_interrupt(struct net_device *ndev)
|
||||
u32 tis = ravb_read(ndev, TIS);
|
||||
|
||||
if (tis & TIS_TFUF) {
|
||||
ravb_write(ndev, ~TIS_TFUF, TIS);
|
||||
ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
|
||||
ravb_get_tx_tstamp(ndev);
|
||||
return true;
|
||||
}
|
||||
@@ -908,7 +909,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
|
||||
/* Processing RX Descriptor Ring */
|
||||
if (ris0 & mask) {
|
||||
/* Clear RX interrupt */
|
||||
ravb_write(ndev, ~mask, RIS0);
|
||||
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
|
||||
if (ravb_rx(ndev, "a, q))
|
||||
goto out;
|
||||
}
|
||||
@@ -916,7 +917,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
|
||||
if (tis & mask) {
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
/* Clear TX interrupt */
|
||||
ravb_write(ndev, ~mask, TIS);
|
||||
ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
|
||||
ravb_tx_free(ndev, q, true);
|
||||
netif_wake_subqueue(ndev, q);
|
||||
mmiowb();
|
||||
|
||||
@@ -319,7 +319,7 @@ void ravb_ptp_interrupt(struct net_device *ndev)
|
||||
}
|
||||
}
|
||||
|
||||
ravb_write(ndev, ~gis, GIS);
|
||||
ravb_write(ndev, ~(gis | GIS_RESERVED), GIS);
|
||||
}
|
||||
|
||||
void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
|
||||
|
||||
@@ -3342,11 +3342,10 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
|
||||
vscsi->dds.window[LOCAL].liobn,
|
||||
vscsi->dds.window[REMOTE].liobn);
|
||||
|
||||
strcpy(vscsi->eye, "VSCSI ");
|
||||
strncat(vscsi->eye, vdev->name, MAX_EYE);
|
||||
snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name);
|
||||
|
||||
vscsi->dds.unit_id = vdev->unit_address;
|
||||
strncpy(vscsi->dds.partition_name, partition_name,
|
||||
strscpy(vscsi->dds.partition_name, partition_name,
|
||||
sizeof(vscsi->dds.partition_name));
|
||||
vscsi->dds.partition_num = partition_number;
|
||||
|
||||
|
||||
@@ -1158,7 +1158,8 @@ static int sd_init_command(struct scsi_cmnd *cmd)
|
||||
case REQ_OP_WRITE:
|
||||
return sd_setup_read_write_cmnd(cmd);
|
||||
default:
|
||||
BUG();
|
||||
WARN_ON_ONCE(1);
|
||||
return BLKPREP_KILL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -537,7 +537,7 @@ static void gs_rx_push(unsigned long _port)
|
||||
}
|
||||
|
||||
/* push data to (open) tty */
|
||||
if (req->actual) {
|
||||
if (req->actual && tty) {
|
||||
char *packet = req->buf;
|
||||
unsigned size = req->actual;
|
||||
unsigned n;
|
||||
|
||||
@@ -3002,9 +3002,6 @@ extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
|
||||
extern int ext4_inline_data_fiemap(struct inode *inode,
|
||||
struct fiemap_extent_info *fieinfo,
|
||||
int *has_inline, __u64 start, __u64 len);
|
||||
extern int ext4_try_to_evict_inline_data(handle_t *handle,
|
||||
struct inode *inode,
|
||||
int needed);
|
||||
extern void ext4_inline_data_truncate(struct inode *inode, int *has_inline);
|
||||
|
||||
extern int ext4_convert_inline_data(struct inode *inode);
|
||||
|
||||
@@ -903,11 +903,11 @@ retry_journal:
|
||||
flags |= AOP_FLAG_NOFS;
|
||||
|
||||
if (ret == -ENOSPC) {
|
||||
ext4_journal_stop(handle);
|
||||
ret = ext4_da_convert_inline_data_to_extent(mapping,
|
||||
inode,
|
||||
flags,
|
||||
fsdata);
|
||||
ext4_journal_stop(handle);
|
||||
if (ret == -ENOSPC &&
|
||||
ext4_should_retry_alloc(inode->i_sb, &retries))
|
||||
goto retry_journal;
|
||||
@@ -1879,42 +1879,6 @@ out:
|
||||
return (error < 0 ? error : 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called during xattr set, and if we can sparse space 'needed',
|
||||
* just create the extent tree evict the data to the outer block.
|
||||
*
|
||||
* We use jbd2 instead of page cache to move data to the 1st block
|
||||
* so that the whole transaction can be committed as a whole and
|
||||
* the data isn't lost because of the delayed page cache write.
|
||||
*/
|
||||
int ext4_try_to_evict_inline_data(handle_t *handle,
|
||||
struct inode *inode,
|
||||
int needed)
|
||||
{
|
||||
int error;
|
||||
struct ext4_xattr_entry *entry;
|
||||
struct ext4_inode *raw_inode;
|
||||
struct ext4_iloc iloc;
|
||||
|
||||
error = ext4_get_inode_loc(inode, &iloc);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
raw_inode = ext4_raw_inode(&iloc);
|
||||
entry = (struct ext4_xattr_entry *)((void *)raw_inode +
|
||||
EXT4_I(inode)->i_inline_off);
|
||||
if (EXT4_XATTR_LEN(entry->e_name_len) +
|
||||
EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)) < needed) {
|
||||
error = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
|
||||
out:
|
||||
brelse(iloc.bh);
|
||||
return error;
|
||||
}
|
||||
|
||||
void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
|
||||
{
|
||||
handle_t *handle;
|
||||
|
||||
@@ -1086,22 +1086,8 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
|
||||
if (EXT4_I(inode)->i_extra_isize == 0)
|
||||
return -ENOSPC;
|
||||
error = ext4_xattr_set_entry(i, s, inode);
|
||||
if (error) {
|
||||
if (error == -ENOSPC &&
|
||||
ext4_has_inline_data(inode)) {
|
||||
error = ext4_try_to_evict_inline_data(handle, inode,
|
||||
EXT4_XATTR_LEN(strlen(i->name) +
|
||||
EXT4_XATTR_SIZE(i->value_len)));
|
||||
if (error)
|
||||
return error;
|
||||
error = ext4_xattr_ibody_find(inode, i, is);
|
||||
if (error)
|
||||
return error;
|
||||
error = ext4_xattr_set_entry(i, s, inode);
|
||||
}
|
||||
if (error)
|
||||
return error;
|
||||
}
|
||||
if (error)
|
||||
return error;
|
||||
header = IHDR(inode, ext4_raw_inode(&is->iloc));
|
||||
if (!IS_LAST_ENTRY(s->first)) {
|
||||
header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
|
||||
|
||||
@@ -21,23 +21,23 @@
|
||||
|
||||
#ifdef arch_idle_time
|
||||
|
||||
static cputime64_t get_idle_time(int cpu)
|
||||
static u64 get_idle_time(int cpu)
|
||||
{
|
||||
cputime64_t idle;
|
||||
u64 idle;
|
||||
|
||||
idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
|
||||
if (cpu_online(cpu) && !nr_iowait_cpu(cpu))
|
||||
idle += arch_idle_time(cpu);
|
||||
idle += cputime_to_nsecs(arch_idle_time(cpu));
|
||||
return idle;
|
||||
}
|
||||
|
||||
static cputime64_t get_iowait_time(int cpu)
|
||||
static u64 get_iowait_time(int cpu)
|
||||
{
|
||||
cputime64_t iowait;
|
||||
u64 iowait;
|
||||
|
||||
iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
|
||||
if (cpu_online(cpu) && nr_iowait_cpu(cpu))
|
||||
iowait += arch_idle_time(cpu);
|
||||
iowait += cputime_to_nsecs(arch_idle_time(cpu));
|
||||
return iowait;
|
||||
}
|
||||
|
||||
@@ -45,32 +45,32 @@ static cputime64_t get_iowait_time(int cpu)
|
||||
|
||||
static u64 get_idle_time(int cpu)
|
||||
{
|
||||
u64 idle, idle_time = -1ULL;
|
||||
u64 idle, idle_usecs = -1ULL;
|
||||
|
||||
if (cpu_online(cpu))
|
||||
idle_time = get_cpu_idle_time_us(cpu, NULL);
|
||||
idle_usecs = get_cpu_idle_time_us(cpu, NULL);
|
||||
|
||||
if (idle_time == -1ULL)
|
||||
if (idle_usecs == -1ULL)
|
||||
/* !NO_HZ or cpu offline so we can rely on cpustat.idle */
|
||||
idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
|
||||
else
|
||||
idle = usecs_to_cputime64(idle_time);
|
||||
idle = idle_usecs * NSEC_PER_USEC;
|
||||
|
||||
return idle;
|
||||
}
|
||||
|
||||
static u64 get_iowait_time(int cpu)
|
||||
{
|
||||
u64 iowait, iowait_time = -1ULL;
|
||||
u64 iowait, iowait_usecs = -1ULL;
|
||||
|
||||
if (cpu_online(cpu))
|
||||
iowait_time = get_cpu_iowait_time_us(cpu, NULL);
|
||||
iowait_usecs = get_cpu_iowait_time_us(cpu, NULL);
|
||||
|
||||
if (iowait_time == -1ULL)
|
||||
if (iowait_usecs == -1ULL)
|
||||
/* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
|
||||
iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
|
||||
else
|
||||
iowait = usecs_to_cputime64(iowait_time);
|
||||
iowait = iowait_usecs * NSEC_PER_USEC;
|
||||
|
||||
return iowait;
|
||||
}
|
||||
@@ -115,16 +115,16 @@ static int show_stat(struct seq_file *p, void *v)
|
||||
}
|
||||
sum += arch_irq_stat();
|
||||
|
||||
seq_put_decimal_ull(p, "cpu ", cputime64_to_clock_t(user));
|
||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice));
|
||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system));
|
||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle));
|
||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait));
|
||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq));
|
||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq));
|
||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal));
|
||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest));
|
||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice));
|
||||
seq_put_decimal_ull(p, "cpu ", nsec_to_clock_t(user));
|
||||
seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
|
||||
seq_put_decimal_ull(p, " ", nsec_to_clock_t(system));
|
||||
seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle));
|
||||
seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait));
|
||||
seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq));
|
||||
seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq));
|
||||
seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal));
|
||||
seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest));
|
||||
seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice));
|
||||
seq_putc(p, '\n');
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
@@ -140,16 +140,16 @@ static int show_stat(struct seq_file *p, void *v)
|
||||
guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
|
||||
guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
|
||||
seq_printf(p, "cpu%d", i);
|
||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(user));
|
||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice));
|
||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system));
|
||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle));
|
||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait));
|
||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq));
|
||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq));
|
||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal));
|
||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest));
|
||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice));
|
||||
seq_put_decimal_ull(p, " ", nsec_to_clock_t(user));
|
||||
seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
|
||||
seq_put_decimal_ull(p, " ", nsec_to_clock_t(system));
|
||||
seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle));
|
||||
seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait));
|
||||
seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq));
|
||||
seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq));
|
||||
seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal));
|
||||
seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest));
|
||||
seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice));
|
||||
seq_putc(p, '\n');
|
||||
}
|
||||
seq_put_decimal_ull(p, "intr ", (unsigned long long)sum);
|
||||
|
||||
@@ -5,23 +5,20 @@
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/cputime.h>
|
||||
|
||||
static int uptime_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct timespec uptime;
|
||||
struct timespec idle;
|
||||
u64 idletime;
|
||||
u64 nsec;
|
||||
u32 rem;
|
||||
int i;
|
||||
|
||||
idletime = 0;
|
||||
nsec = 0;
|
||||
for_each_possible_cpu(i)
|
||||
idletime += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
|
||||
nsec += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
|
||||
|
||||
get_monotonic_boottime(&uptime);
|
||||
nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
|
||||
idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
|
||||
idle.tv_nsec = rem;
|
||||
seq_printf(m, "%lu.%02lu %lu.%02lu\n",
|
||||
|
||||
@@ -22,7 +22,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned char *vec);
|
||||
extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
|
||||
unsigned long new_addr, unsigned long old_end,
|
||||
pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
|
||||
pmd_t *old_pmd, pmd_t *new_pmd);
|
||||
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long addr, pgprot_t newprot,
|
||||
int prot_numa);
|
||||
|
||||
@@ -297,7 +297,7 @@ static int cpuacct_stats_show(struct seq_file *sf, void *v)
|
||||
for (stat = 0; stat < CPUACCT_STAT_NSTATS; stat++) {
|
||||
seq_printf(sf, "%s %lld\n",
|
||||
cpuacct_stat_desc[stat],
|
||||
cputime64_to_clock_t(val[stat]));
|
||||
nsec_to_clock_t(val[stat]));
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -38,6 +38,18 @@ void disable_sched_clock_irqtime(void)
|
||||
sched_clock_irqtime = 0;
|
||||
}
|
||||
|
||||
static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
|
||||
enum cpu_usage_stat idx)
|
||||
{
|
||||
u64 *cpustat = kcpustat_this_cpu->cpustat;
|
||||
|
||||
u64_stats_update_begin(&irqtime->sync);
|
||||
cpustat[idx] += delta;
|
||||
irqtime->total += delta;
|
||||
irqtime->tick_delta += delta;
|
||||
u64_stats_update_end(&irqtime->sync);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called before incrementing preempt_count on {soft,}irq_enter
|
||||
* and before decrementing preempt_count on {soft,}irq_exit.
|
||||
@@ -62,7 +74,6 @@ void irqtime_account_irq(struct task_struct *curr)
|
||||
delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
|
||||
irqtime->irq_start_time += delta;
|
||||
|
||||
u64_stats_update_begin(&irqtime->sync);
|
||||
/*
|
||||
* We do not account for softirq time from ksoftirqd here.
|
||||
* We want to continue accounting softirq time to ksoftirqd thread
|
||||
@@ -70,9 +81,9 @@ void irqtime_account_irq(struct task_struct *curr)
|
||||
* that do not consume any time, but still wants to run.
|
||||
*/
|
||||
if (hardirq_count())
|
||||
irqtime->hardirq_time += delta;
|
||||
irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
|
||||
else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
|
||||
irqtime->softirq_time += delta;
|
||||
irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
else
|
||||
account = false;
|
||||
@@ -86,40 +97,23 @@ void irqtime_account_irq(struct task_struct *curr)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irqtime_account_irq);
|
||||
|
||||
static cputime_t irqtime_account_update(u64 irqtime, int idx, cputime_t maxtime)
|
||||
static cputime_t irqtime_tick_accounted(cputime_t maxtime)
|
||||
{
|
||||
u64 *cpustat = kcpustat_this_cpu->cpustat;
|
||||
cputime_t irq_cputime;
|
||||
struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
|
||||
cputime_t delta;
|
||||
|
||||
irq_cputime = nsecs_to_cputime64(irqtime) - cpustat[idx];
|
||||
irq_cputime = min(irq_cputime, maxtime);
|
||||
cpustat[idx] += irq_cputime;
|
||||
delta = nsecs_to_cputime(irqtime->tick_delta);
|
||||
delta = min(delta, maxtime);
|
||||
irqtime->tick_delta -= cputime_to_nsecs(delta);
|
||||
|
||||
return irq_cputime;
|
||||
}
|
||||
|
||||
static cputime_t irqtime_account_hi_update(cputime_t maxtime)
|
||||
{
|
||||
return irqtime_account_update(__this_cpu_read(cpu_irqtime.hardirq_time),
|
||||
CPUTIME_IRQ, maxtime);
|
||||
}
|
||||
|
||||
static cputime_t irqtime_account_si_update(cputime_t maxtime)
|
||||
{
|
||||
return irqtime_account_update(__this_cpu_read(cpu_irqtime.softirq_time),
|
||||
CPUTIME_SOFTIRQ, maxtime);
|
||||
return delta;
|
||||
}
|
||||
|
||||
#else /* CONFIG_IRQ_TIME_ACCOUNTING */
|
||||
|
||||
#define sched_clock_irqtime (0)
|
||||
|
||||
static cputime_t irqtime_account_hi_update(cputime_t dummy)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static cputime_t irqtime_account_si_update(cputime_t dummy)
|
||||
static cputime_t irqtime_tick_accounted(cputime_t dummy)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -159,7 +153,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
|
||||
index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
|
||||
|
||||
/* Add user time to cpustat. */
|
||||
task_group_account_field(p, index, (__force u64) cputime);
|
||||
task_group_account_field(p, index, cputime_to_nsecs(cputime));
|
||||
|
||||
/* Account for user time used */
|
||||
acct_account_cputime(p);
|
||||
@@ -187,11 +181,11 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime,
|
||||
|
||||
/* Add guest time to cpustat. */
|
||||
if (task_nice(p) > 0) {
|
||||
cpustat[CPUTIME_NICE] += (__force u64) cputime;
|
||||
cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
|
||||
cpustat[CPUTIME_NICE] += cputime_to_nsecs(cputime);
|
||||
cpustat[CPUTIME_GUEST_NICE] += cputime_to_nsecs(cputime);
|
||||
} else {
|
||||
cpustat[CPUTIME_USER] += (__force u64) cputime;
|
||||
cpustat[CPUTIME_GUEST] += (__force u64) cputime;
|
||||
cpustat[CPUTIME_USER] += cputime_to_nsecs(cputime);
|
||||
cpustat[CPUTIME_GUEST] += cputime_to_nsecs(cputime);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -212,7 +206,7 @@ void __account_system_time(struct task_struct *p, cputime_t cputime,
|
||||
account_group_system_time(p, cputime);
|
||||
|
||||
/* Add system time to cpustat. */
|
||||
task_group_account_field(p, index, (__force u64) cputime);
|
||||
task_group_account_field(p, index, cputime_to_nsecs(cputime));
|
||||
|
||||
/* Account for system time used */
|
||||
acct_account_cputime(p);
|
||||
@@ -256,7 +250,7 @@ void account_steal_time(cputime_t cputime)
|
||||
{
|
||||
u64 *cpustat = kcpustat_this_cpu->cpustat;
|
||||
|
||||
cpustat[CPUTIME_STEAL] += (__force u64) cputime;
|
||||
cpustat[CPUTIME_STEAL] += cputime_to_nsecs(cputime);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -269,9 +263,9 @@ void account_idle_time(cputime_t cputime)
|
||||
struct rq *rq = this_rq();
|
||||
|
||||
if (atomic_read(&rq->nr_iowait) > 0)
|
||||
cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
|
||||
cpustat[CPUTIME_IOWAIT] += cputime_to_nsecs(cputime);
|
||||
else
|
||||
cpustat[CPUTIME_IDLE] += (__force u64) cputime;
|
||||
cpustat[CPUTIME_IDLE] += cputime_to_nsecs(cputime);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -312,10 +306,7 @@ static inline cputime_t account_other_time(cputime_t max)
|
||||
accounted = steal_account_process_time(max);
|
||||
|
||||
if (accounted < max)
|
||||
accounted += irqtime_account_hi_update(max - accounted);
|
||||
|
||||
if (accounted < max)
|
||||
accounted += irqtime_account_si_update(max - accounted);
|
||||
accounted += irqtime_tick_accounted(max - accounted);
|
||||
|
||||
return accounted;
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
#include <linux/sched/rt.h>
|
||||
#include <linux/u64_stats_sync.h>
|
||||
#include <linux/sched/deadline.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/binfmts.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/spinlock.h>
|
||||
@@ -1955,14 +1956,19 @@ static inline void nohz_balance_exit_idle(unsigned int cpu) { }
|
||||
|
||||
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
||||
struct irqtime {
|
||||
u64 hardirq_time;
|
||||
u64 softirq_time;
|
||||
u64 total;
|
||||
u64 tick_delta;
|
||||
u64 irq_start_time;
|
||||
struct u64_stats_sync sync;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
|
||||
|
||||
/*
|
||||
* Returns the irqtime minus the softirq time computed by ksoftirqd.
|
||||
* Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
|
||||
* and never move forward.
|
||||
*/
|
||||
static inline u64 irq_time_read(int cpu)
|
||||
{
|
||||
struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
|
||||
@@ -1971,7 +1977,7 @@ static inline u64 irq_time_read(int cpu)
|
||||
|
||||
do {
|
||||
seq = __u64_stats_fetch_begin(&irqtime->sync);
|
||||
total = irqtime->softirq_time + irqtime->hardirq_time;
|
||||
total = irqtime->total;
|
||||
} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
|
||||
|
||||
return total;
|
||||
|
||||
@@ -1445,7 +1445,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
|
||||
bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
|
||||
unsigned long new_addr, unsigned long old_end,
|
||||
pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
|
||||
pmd_t *old_pmd, pmd_t *new_pmd)
|
||||
{
|
||||
spinlock_t *old_ptl, *new_ptl;
|
||||
pmd_t pmd;
|
||||
@@ -1476,7 +1476,7 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
|
||||
if (new_ptl != old_ptl)
|
||||
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
|
||||
pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
|
||||
if (pmd_present(pmd) && pmd_dirty(pmd))
|
||||
if (pmd_present(pmd))
|
||||
force_flush = true;
|
||||
VM_BUG_ON(!pmd_none(*new_pmd));
|
||||
|
||||
@@ -1487,12 +1487,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
|
||||
pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
|
||||
}
|
||||
set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
|
||||
if (new_ptl != old_ptl)
|
||||
spin_unlock(new_ptl);
|
||||
if (force_flush)
|
||||
flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
|
||||
else
|
||||
*need_flush = true;
|
||||
if (new_ptl != old_ptl)
|
||||
spin_unlock(new_ptl);
|
||||
spin_unlock(old_ptl);
|
||||
return true;
|
||||
}
|
||||
|
||||
30
mm/mremap.c
30
mm/mremap.c
@@ -104,7 +104,7 @@ static pte_t move_soft_dirty_pte(pte_t pte)
|
||||
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
||||
unsigned long old_addr, unsigned long old_end,
|
||||
struct vm_area_struct *new_vma, pmd_t *new_pmd,
|
||||
unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
|
||||
unsigned long new_addr, bool need_rmap_locks)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
pte_t *old_pte, *new_pte, pte;
|
||||
@@ -152,15 +152,17 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
||||
|
||||
pte = ptep_get_and_clear(mm, old_addr, old_pte);
|
||||
/*
|
||||
* If we are remapping a dirty PTE, make sure
|
||||
* If we are remapping a valid PTE, make sure
|
||||
* to flush TLB before we drop the PTL for the
|
||||
* old PTE or we may race with page_mkclean().
|
||||
* PTE.
|
||||
*
|
||||
* This check has to be done after we removed the
|
||||
* old PTE from page tables or another thread may
|
||||
* dirty it after the check and before the removal.
|
||||
* NOTE! Both old and new PTL matter: the old one
|
||||
* for racing with page_mkclean(), the new one to
|
||||
* make sure the physical page stays valid until
|
||||
* the TLB entry for the old mapping has been
|
||||
* flushed.
|
||||
*/
|
||||
if (pte_present(pte) && pte_dirty(pte))
|
||||
if (pte_present(pte))
|
||||
force_flush = true;
|
||||
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
|
||||
pte = move_soft_dirty_pte(pte);
|
||||
@@ -168,13 +170,11 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
||||
}
|
||||
|
||||
arch_leave_lazy_mmu_mode();
|
||||
if (force_flush)
|
||||
flush_tlb_range(vma, old_end - len, old_end);
|
||||
if (new_ptl != old_ptl)
|
||||
spin_unlock(new_ptl);
|
||||
pte_unmap(new_pte - 1);
|
||||
if (force_flush)
|
||||
flush_tlb_range(vma, old_end - len, old_end);
|
||||
else
|
||||
*need_flush = true;
|
||||
pte_unmap_unlock(old_pte - 1, old_ptl);
|
||||
if (need_rmap_locks)
|
||||
drop_rmap_locks(vma);
|
||||
@@ -189,7 +189,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
{
|
||||
unsigned long extent, next, old_end;
|
||||
pmd_t *old_pmd, *new_pmd;
|
||||
bool need_flush = false;
|
||||
unsigned long mmun_start; /* For mmu_notifiers */
|
||||
unsigned long mmun_end; /* For mmu_notifiers */
|
||||
|
||||
@@ -220,8 +219,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
if (need_rmap_locks)
|
||||
take_rmap_locks(vma);
|
||||
moved = move_huge_pmd(vma, old_addr, new_addr,
|
||||
old_end, old_pmd, new_pmd,
|
||||
&need_flush);
|
||||
old_end, old_pmd, new_pmd);
|
||||
if (need_rmap_locks)
|
||||
drop_rmap_locks(vma);
|
||||
if (moved)
|
||||
@@ -239,10 +237,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
if (extent > LATENCY_LIMIT)
|
||||
extent = LATENCY_LIMIT;
|
||||
move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
|
||||
new_pmd, new_addr, need_rmap_locks, &need_flush);
|
||||
new_pmd, new_addr, need_rmap_locks);
|
||||
}
|
||||
if (need_flush)
|
||||
flush_tlb_range(vma, old_end-len, old_addr);
|
||||
|
||||
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
|
||||
|
||||
|
||||
@@ -243,6 +243,7 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
|
||||
struct batadv_priv *bat_priv;
|
||||
struct sk_buff *skb;
|
||||
u32 elp_interval;
|
||||
bool ret;
|
||||
|
||||
bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
|
||||
hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
|
||||
@@ -304,8 +305,11 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
|
||||
* may sleep and that is not allowed in an rcu protected
|
||||
* context. Therefore schedule a task for that.
|
||||
*/
|
||||
queue_work(batadv_event_workqueue,
|
||||
&hardif_neigh->bat_v.metric_work);
|
||||
ret = queue_work(batadv_event_workqueue,
|
||||
&hardif_neigh->bat_v.metric_work);
|
||||
|
||||
if (!ret)
|
||||
batadv_hardif_neigh_put(hardif_neigh);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
|
||||
@@ -1767,6 +1767,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
|
||||
{
|
||||
struct batadv_bla_backbone_gw *backbone_gw;
|
||||
struct ethhdr *ethhdr;
|
||||
bool ret;
|
||||
|
||||
ethhdr = eth_hdr(skb);
|
||||
|
||||
@@ -1790,8 +1791,13 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
|
||||
if (unlikely(!backbone_gw))
|
||||
return true;
|
||||
|
||||
queue_work(batadv_event_workqueue, &backbone_gw->report_work);
|
||||
/* backbone_gw is unreferenced in the report work function function */
|
||||
ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
|
||||
|
||||
/* backbone_gw is unreferenced in the report work function function
|
||||
* if queue_work() call was successful
|
||||
*/
|
||||
if (!ret)
|
||||
batadv_backbone_gw_put(backbone_gw);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -845,24 +845,6 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
|
||||
spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
|
||||
struct list_head *list;
|
||||
|
||||
/* Check if nc_node is already added */
|
||||
nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
|
||||
|
||||
/* Node found */
|
||||
if (nc_node)
|
||||
return nc_node;
|
||||
|
||||
nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
|
||||
if (!nc_node)
|
||||
return NULL;
|
||||
|
||||
/* Initialize nc_node */
|
||||
INIT_LIST_HEAD(&nc_node->list);
|
||||
kref_init(&nc_node->refcount);
|
||||
ether_addr_copy(nc_node->addr, orig_node->orig);
|
||||
kref_get(&orig_neigh_node->refcount);
|
||||
nc_node->orig_node = orig_neigh_node;
|
||||
|
||||
/* Select ingoing or outgoing coding node */
|
||||
if (in_coding) {
|
||||
lock = &orig_neigh_node->in_coding_list_lock;
|
||||
@@ -872,13 +854,34 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
|
||||
list = &orig_neigh_node->out_coding_list;
|
||||
}
|
||||
|
||||
spin_lock_bh(lock);
|
||||
|
||||
/* Check if nc_node is already added */
|
||||
nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
|
||||
|
||||
/* Node found */
|
||||
if (nc_node)
|
||||
goto unlock;
|
||||
|
||||
nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
|
||||
if (!nc_node)
|
||||
goto unlock;
|
||||
|
||||
/* Initialize nc_node */
|
||||
INIT_LIST_HEAD(&nc_node->list);
|
||||
kref_init(&nc_node->refcount);
|
||||
ether_addr_copy(nc_node->addr, orig_node->orig);
|
||||
kref_get(&orig_neigh_node->refcount);
|
||||
nc_node->orig_node = orig_neigh_node;
|
||||
|
||||
batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n",
|
||||
nc_node->addr, nc_node->orig_node->orig);
|
||||
|
||||
/* Add nc_node to orig_node */
|
||||
spin_lock_bh(lock);
|
||||
kref_get(&nc_node->refcount);
|
||||
list_add_tail_rcu(&nc_node->list, list);
|
||||
|
||||
unlock:
|
||||
spin_unlock_bh(lock);
|
||||
|
||||
return nc_node;
|
||||
|
||||
@@ -565,15 +565,20 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
|
||||
struct batadv_softif_vlan *vlan;
|
||||
int err;
|
||||
|
||||
spin_lock_bh(&bat_priv->softif_vlan_list_lock);
|
||||
|
||||
vlan = batadv_softif_vlan_get(bat_priv, vid);
|
||||
if (vlan) {
|
||||
batadv_softif_vlan_put(vlan);
|
||||
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
|
||||
if (!vlan)
|
||||
if (!vlan) {
|
||||
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
vlan->bat_priv = bat_priv;
|
||||
vlan->vid = vid;
|
||||
@@ -581,17 +586,23 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
|
||||
|
||||
atomic_set(&vlan->ap_isolation, 0);
|
||||
|
||||
err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
|
||||
if (err) {
|
||||
kfree(vlan);
|
||||
return err;
|
||||
}
|
||||
|
||||
spin_lock_bh(&bat_priv->softif_vlan_list_lock);
|
||||
kref_get(&vlan->refcount);
|
||||
hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
|
||||
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
|
||||
|
||||
/* batadv_sysfs_add_vlan cannot be in the spinlock section due to the
|
||||
* sleeping behavior of the sysfs functions and the fs_reclaim lock
|
||||
*/
|
||||
err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
|
||||
if (err) {
|
||||
/* ref for the function */
|
||||
batadv_softif_vlan_put(vlan);
|
||||
|
||||
/* ref for the list */
|
||||
batadv_softif_vlan_put(vlan);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* add a new TT local entry. This one will be marked with the NOPURGE
|
||||
* flag
|
||||
*/
|
||||
|
||||
@@ -187,7 +187,8 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
|
||||
\
|
||||
return __batadv_store_uint_attr(buff, count, _min, _max, \
|
||||
_post_func, attr, \
|
||||
&bat_priv->_var, net_dev); \
|
||||
&bat_priv->_var, net_dev, \
|
||||
NULL); \
|
||||
}
|
||||
|
||||
#define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \
|
||||
@@ -261,7 +262,9 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
|
||||
\
|
||||
length = __batadv_store_uint_attr(buff, count, _min, _max, \
|
||||
_post_func, attr, \
|
||||
&hard_iface->_var, net_dev); \
|
||||
&hard_iface->_var, \
|
||||
hard_iface->soft_iface, \
|
||||
net_dev); \
|
||||
\
|
||||
batadv_hardif_put(hard_iface); \
|
||||
return length; \
|
||||
@@ -355,10 +358,12 @@ __batadv_store_bool_attr(char *buff, size_t count,
|
||||
|
||||
static int batadv_store_uint_attr(const char *buff, size_t count,
|
||||
struct net_device *net_dev,
|
||||
struct net_device *slave_dev,
|
||||
const char *attr_name,
|
||||
unsigned int min, unsigned int max,
|
||||
atomic_t *attr)
|
||||
{
|
||||
char ifname[IFNAMSIZ + 3] = "";
|
||||
unsigned long uint_val;
|
||||
int ret;
|
||||
|
||||
@@ -384,8 +389,11 @@ static int batadv_store_uint_attr(const char *buff, size_t count,
|
||||
if (atomic_read(attr) == uint_val)
|
||||
return count;
|
||||
|
||||
batadv_info(net_dev, "%s: Changing from: %i to: %lu\n",
|
||||
attr_name, atomic_read(attr), uint_val);
|
||||
if (slave_dev)
|
||||
snprintf(ifname, sizeof(ifname), "%s: ", slave_dev->name);
|
||||
|
||||
batadv_info(net_dev, "%s: %sChanging from: %i to: %lu\n",
|
||||
attr_name, ifname, atomic_read(attr), uint_val);
|
||||
|
||||
atomic_set(attr, uint_val);
|
||||
return count;
|
||||
@@ -396,12 +404,13 @@ static ssize_t __batadv_store_uint_attr(const char *buff, size_t count,
|
||||
void (*post_func)(struct net_device *),
|
||||
const struct attribute *attr,
|
||||
atomic_t *attr_store,
|
||||
struct net_device *net_dev)
|
||||
struct net_device *net_dev,
|
||||
struct net_device *slave_dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max,
|
||||
attr_store);
|
||||
ret = batadv_store_uint_attr(buff, count, net_dev, slave_dev,
|
||||
attr->name, min, max, attr_store);
|
||||
if (post_func && ret)
|
||||
post_func(net_dev);
|
||||
|
||||
@@ -570,7 +579,7 @@ static ssize_t batadv_store_gw_sel_class(struct kobject *kobj,
|
||||
return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE,
|
||||
batadv_post_gw_reselect, attr,
|
||||
&bat_priv->gw.sel_class,
|
||||
bat_priv->soft_iface);
|
||||
bat_priv->soft_iface, NULL);
|
||||
}
|
||||
|
||||
static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
|
||||
@@ -1084,8 +1093,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
|
||||
if (old_tp_override == tp_override)
|
||||
goto out;
|
||||
|
||||
batadv_info(net_dev, "%s: Changing from: %u.%u MBit to: %u.%u MBit\n",
|
||||
"throughput_override",
|
||||
batadv_info(hard_iface->soft_iface,
|
||||
"%s: %s: Changing from: %u.%u MBit to: %u.%u MBit\n",
|
||||
"throughput_override", net_dev->name,
|
||||
old_tp_override / 10, old_tp_override % 10,
|
||||
tp_override / 10, tp_override % 10);
|
||||
|
||||
|
||||
@@ -1550,6 +1550,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
|
||||
{
|
||||
struct batadv_tt_orig_list_entry *orig_entry;
|
||||
|
||||
spin_lock_bh(&tt_global->list_lock);
|
||||
|
||||
orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
|
||||
if (orig_entry) {
|
||||
/* refresh the ttvn: the current value could be a bogus one that
|
||||
@@ -1570,16 +1572,16 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
|
||||
orig_entry->ttvn = ttvn;
|
||||
kref_init(&orig_entry->refcount);
|
||||
|
||||
spin_lock_bh(&tt_global->list_lock);
|
||||
kref_get(&orig_entry->refcount);
|
||||
hlist_add_head_rcu(&orig_entry->list,
|
||||
&tt_global->orig_list);
|
||||
spin_unlock_bh(&tt_global->list_lock);
|
||||
atomic_inc(&tt_global->orig_list_count);
|
||||
|
||||
out:
|
||||
if (orig_entry)
|
||||
batadv_tt_orig_list_entry_put(orig_entry);
|
||||
|
||||
spin_unlock_bh(&tt_global->list_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -528,15 +528,20 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
|
||||
{
|
||||
struct batadv_tvlv_handler *tvlv_handler;
|
||||
|
||||
spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
|
||||
|
||||
tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
|
||||
if (tvlv_handler) {
|
||||
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
|
||||
batadv_tvlv_handler_put(tvlv_handler);
|
||||
return;
|
||||
}
|
||||
|
||||
tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
|
||||
if (!tvlv_handler)
|
||||
if (!tvlv_handler) {
|
||||
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
tvlv_handler->ogm_handler = optr;
|
||||
tvlv_handler->unicast_handler = uptr;
|
||||
@@ -546,7 +551,6 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
|
||||
kref_init(&tvlv_handler->refcount);
|
||||
INIT_HLIST_NODE(&tvlv_handler->list);
|
||||
|
||||
spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
|
||||
kref_get(&tvlv_handler->refcount);
|
||||
hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
|
||||
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
|
||||
|
||||
@@ -421,7 +421,7 @@ nf_nat_setup_info(struct nf_conn *ct,
|
||||
else
|
||||
ct->status |= IPS_DST_NAT;
|
||||
|
||||
if (nfct_help(ct))
|
||||
if (nfct_help(ct) && !nfct_seqadj(ct))
|
||||
if (!nfct_seqadj_ext_add(ct))
|
||||
return NF_DROP;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user