Merge 4.14.88 into android-4.14-p
Changes in 4.14.88
media: omap3isp: Unregister media device as first
iommu/vt-d: Fix NULL pointer dereference in prq_event_thread()
brcmutil: really fix decoding channel info for 160 MHz bandwidth
iommu/ipmmu-vmsa: Fix crash on early domain free
can: rcar_can: Fix erroneous registration
test_firmware: fix error return getting clobbered
HID: input: Ignore battery reported by Symbol DS4308
batman-adv: Use explicit tvlv padding for ELP packets
batman-adv: Expand merged fragment buffer for full packet
amd/iommu: Fix Guest Virtual APIC Log Tail Address Register
bnx2x: Assign unique DMAE channel number for FW DMAE transactions.
qed: Fix PTT leak in qed_drain()
qed: Fix reading wrong value in loop condition
Revert "usb: gadget: ffs: Fix BUG when userland exits with submitted AIO transfers"
net/mlx4_core: Zero out lkey field in SW2HW_MPT fw command
net/mlx4_core: Fix uninitialized variable compilation warning
net/mlx4: Fix UBSAN warning of signed integer overflow
gpio: mockup: fix indicated direction
mtd: rawnand: qcom: Namespace prefix some commands
HID: multitouch: Add pointstick support for Cirque Touchpad
mtd: spi-nor: Fix Cadence QSPI page fault kernel panic
qed: Fix bitmap_weight() check
qed: Fix QM getters to always return a valid pq
net: faraday: ftmac100: remove netif_running(netdev) check before disabling interrupts
iommu/vt-d: Use memunmap to free memremap
flexfiles: use per-mirror specified stateid for IO
ibmvnic: Fix RX queue buffer cleanup
team: no need to do team_notify_peers or team_mcast_rejoin when disabling port
net: amd: add missing of_node_put()
mm: don't warn about allocations which stall for too long
usb: quirk: add no-LPM quirk on SanDisk Ultra Flair device
usb: appledisplay: Add 27" Apple Cinema Display
USB: check usb_get_extra_descriptor for proper size
ALSA: usb-audio: Fix UAF decrement if card has no live interfaces in card.c
ALSA: hda: Add support for AMD Stoney Ridge
ALSA: pcm: Fix starvation on down_write_nonblock()
ALSA: pcm: Call snd_pcm_unlink() conditionally at closing
ALSA: pcm: Fix interval evaluation with openmin/max
ALSA: hda/realtek - Fix speaker output regression on Thinkpad T570
virtio/s390: avoid race on vcdev->config
virtio/s390: fix race in ccw_io_helper()
vhost/vsock: fix use-after-free in network stack callers
SUNRPC: Fix leak of krb5p encode pages
dmaengine: dw: Fix FIFO size for Intel Merrifield
dmaengine: cppi41: delete channel from pending list when stop channel
ARM: 8806/1: kprobes: Fix false positive with FORTIFY_SOURCE
xhci: workaround CSS timeout on AMD SNPS 3.0 xHC
xhci: Prevent U1/U2 link pm states if exit latency is too long
f2fs: fix to do sanity check with block address in main area v2
swiotlb: clean up reporting
Staging: lustre: remove two build warnings
staging: atomisp: remove "fun" strncpy warning
cifs: Fix separator when building path from dentry
staging: rtl8712: Fix possible buffer overrun
Revert commit ef9209b642 "staging: rtl8723bs: Fix indenting errors and an off-by-one mistake in core/rtw_mlme_ext.c"
drm/amdgpu: update mc firmware image for polaris12 variants
drm/amdgpu/gmc8: update MC firmware for polaris
Drivers: hv: vmbus: Offload the handling of channels to two workqueues
tty: serial: 8250_mtk: always resume the device in probe.
tty: do not set TTY_IO_ERROR flag if console port
kgdboc: fix KASAN global-out-of-bounds bug in param_set_kgdboc_var()
libnvdimm, pfn: Pad pfn namespaces relative to other regions
mac80211_hwsim: Timer should be initialized before device registered
mac80211: Clear beacon_int in ieee80211_do_stop
mac80211: ignore tx status for PS stations in ieee80211_tx_status_ext
mac80211: fix reordering of buffered broadcast packets
mac80211: ignore NullFunc frames in the duplicate detection
Linux 4.14.88
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 87
|
||||
SUBLEVEL = 88
|
||||
EXTRAVERSION =
|
||||
NAME = Petit Gorille
|
||||
|
||||
|
||||
@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
|
||||
}
|
||||
|
||||
/* Copy arch-dep-instance from template. */
|
||||
memcpy(code, &optprobe_template_entry,
|
||||
memcpy(code, (unsigned char *)optprobe_template_entry,
|
||||
TMPL_END_IDX * sizeof(kprobe_opcode_t));
|
||||
|
||||
/* Adjust buffer according to instruction. */
|
||||
|
||||
@@ -723,8 +723,22 @@ static int cppi41_stop_chan(struct dma_chan *chan)
|
||||
|
||||
desc_phys = lower_32_bits(c->desc_phys);
|
||||
desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
|
||||
if (!cdd->chan_busy[desc_num])
|
||||
if (!cdd->chan_busy[desc_num]) {
|
||||
struct cppi41_channel *cc, *_ct;
|
||||
|
||||
/*
|
||||
* channels might still be in the pendling list if
|
||||
* cppi41_dma_issue_pending() is called after
|
||||
* cppi41_runtime_suspend() is called
|
||||
*/
|
||||
list_for_each_entry_safe(cc, _ct, &cdd->pending, node) {
|
||||
if (cc != c)
|
||||
continue;
|
||||
list_del(&cc->node);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = cppi41_tear_down_chan(c);
|
||||
if (ret)
|
||||
|
||||
@@ -1064,12 +1064,12 @@ static void dwc_issue_pending(struct dma_chan *chan)
|
||||
/*
|
||||
* Program FIFO size of channels.
|
||||
*
|
||||
* By default full FIFO (1024 bytes) is assigned to channel 0. Here we
|
||||
* By default full FIFO (512 bytes) is assigned to channel 0. Here we
|
||||
* slice FIFO on equal parts between channels.
|
||||
*/
|
||||
static void idma32_fifo_partition(struct dw_dma *dw)
|
||||
{
|
||||
u64 value = IDMA32C_FP_PSIZE_CH0(128) | IDMA32C_FP_PSIZE_CH1(128) |
|
||||
u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) |
|
||||
IDMA32C_FP_UPDATE;
|
||||
u64 fifo_partition = 0;
|
||||
|
||||
@@ -1082,7 +1082,7 @@ static void idma32_fifo_partition(struct dw_dma *dw)
|
||||
/* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
|
||||
fifo_partition |= value << 32;
|
||||
|
||||
/* Program FIFO Partition registers - 128 bytes for each channel */
|
||||
/* Program FIFO Partition registers - 64 bytes per channel */
|
||||
idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
|
||||
idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
|
||||
}
|
||||
|
||||
@@ -35,8 +35,8 @@
|
||||
#define GPIO_MOCKUP_MAX_RANGES (GPIO_MOCKUP_MAX_GC * 2)
|
||||
|
||||
enum {
|
||||
GPIO_MOCKUP_DIR_OUT = 0,
|
||||
GPIO_MOCKUP_DIR_IN = 1,
|
||||
GPIO_MOCKUP_DIR_IN = 0,
|
||||
GPIO_MOCKUP_DIR_OUT = 1,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -112,7 +112,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
|
||||
{
|
||||
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
|
||||
|
||||
return chip->lines[offset].dir;
|
||||
return !chip->lines[offset].dir;
|
||||
}
|
||||
|
||||
static int gpio_mockup_name_lines(struct device *dev,
|
||||
|
||||
@@ -52,6 +52,9 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
|
||||
MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
|
||||
|
||||
static const u32 golden_settings_tonga_a11[] =
|
||||
{
|
||||
@@ -219,13 +222,39 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
|
||||
chip_name = "tonga";
|
||||
break;
|
||||
case CHIP_POLARIS11:
|
||||
chip_name = "polaris11";
|
||||
if (((adev->pdev->device == 0x67ef) &&
|
||||
((adev->pdev->revision == 0xe0) ||
|
||||
(adev->pdev->revision == 0xe5))) ||
|
||||
((adev->pdev->device == 0x67ff) &&
|
||||
((adev->pdev->revision == 0xcf) ||
|
||||
(adev->pdev->revision == 0xef) ||
|
||||
(adev->pdev->revision == 0xff))))
|
||||
chip_name = "polaris11_k";
|
||||
else if ((adev->pdev->device == 0x67ef) &&
|
||||
(adev->pdev->revision == 0xe2))
|
||||
chip_name = "polaris11_k";
|
||||
else
|
||||
chip_name = "polaris11";
|
||||
break;
|
||||
case CHIP_POLARIS10:
|
||||
chip_name = "polaris10";
|
||||
if ((adev->pdev->device == 0x67df) &&
|
||||
((adev->pdev->revision == 0xe1) ||
|
||||
(adev->pdev->revision == 0xf7)))
|
||||
chip_name = "polaris10_k";
|
||||
else
|
||||
chip_name = "polaris10";
|
||||
break;
|
||||
case CHIP_POLARIS12:
|
||||
chip_name = "polaris12";
|
||||
if (((adev->pdev->device == 0x6987) &&
|
||||
((adev->pdev->revision == 0xc0) ||
|
||||
(adev->pdev->revision == 0xc3))) ||
|
||||
((adev->pdev->device == 0x6981) &&
|
||||
((adev->pdev->revision == 0x00) ||
|
||||
(adev->pdev->revision == 0x01) ||
|
||||
(adev->pdev->revision == 0x10))))
|
||||
chip_name = "polaris12_k";
|
||||
else
|
||||
chip_name = "polaris12";
|
||||
break;
|
||||
case CHIP_FIJI:
|
||||
case CHIP_CARRIZO:
|
||||
|
||||
@@ -266,6 +266,9 @@
|
||||
|
||||
#define USB_VENDOR_ID_CIDC 0x1677
|
||||
|
||||
#define I2C_VENDOR_ID_CIRQUE 0x0488
|
||||
#define I2C_PRODUCT_ID_CIRQUE_121F 0x121F
|
||||
|
||||
#define USB_VENDOR_ID_CJTOUCH 0x24b8
|
||||
#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020 0x0020
|
||||
#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040 0x0040
|
||||
@@ -1001,6 +1004,7 @@
|
||||
#define USB_VENDOR_ID_SYMBOL 0x05e0
|
||||
#define USB_DEVICE_ID_SYMBOL_SCANNER_1 0x0800
|
||||
#define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300
|
||||
#define USB_DEVICE_ID_SYMBOL_SCANNER_3 0x1200
|
||||
|
||||
#define USB_VENDOR_ID_SYNAPTICS 0x06cb
|
||||
#define USB_DEVICE_ID_SYNAPTICS_TP 0x0001
|
||||
|
||||
@@ -325,6 +325,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
|
||||
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM,
|
||||
USB_DEVICE_ID_ELECOM_BM084),
|
||||
HID_BATTERY_QUIRK_IGNORE },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_SYMBOL,
|
||||
USB_DEVICE_ID_SYMBOL_SCANNER_3),
|
||||
HID_BATTERY_QUIRK_IGNORE },
|
||||
{}
|
||||
};
|
||||
|
||||
|
||||
@@ -1474,6 +1474,12 @@ static const struct hid_device_id mt_devices[] = {
|
||||
MT_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
|
||||
USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
|
||||
|
||||
/* Cirque devices */
|
||||
{ .driver_data = MT_CLS_WIN_8_DUAL,
|
||||
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
|
||||
I2C_VENDOR_ID_CIRQUE,
|
||||
I2C_PRODUCT_ID_CIRQUE_121F) },
|
||||
|
||||
/* CJTouch panels */
|
||||
{ .driver_data = MT_CLS_NSMU,
|
||||
MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH,
|
||||
|
||||
@@ -444,61 +444,16 @@ void vmbus_free_channels(void)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* vmbus_process_offer - Process the offer by creating a channel/device
|
||||
* associated with this offer
|
||||
*/
|
||||
static void vmbus_process_offer(struct vmbus_channel *newchannel)
|
||||
/* Note: the function can run concurrently for primary/sub channels. */
|
||||
static void vmbus_add_channel_work(struct work_struct *work)
|
||||
{
|
||||
struct vmbus_channel *channel;
|
||||
bool fnew = true;
|
||||
struct vmbus_channel *newchannel =
|
||||
container_of(work, struct vmbus_channel, add_channel_work);
|
||||
struct vmbus_channel *primary_channel = newchannel->primary_channel;
|
||||
unsigned long flags;
|
||||
u16 dev_type;
|
||||
int ret;
|
||||
|
||||
/* Make sure this is a new offer */
|
||||
mutex_lock(&vmbus_connection.channel_mutex);
|
||||
|
||||
/*
|
||||
* Now that we have acquired the channel_mutex,
|
||||
* we can release the potentially racing rescind thread.
|
||||
*/
|
||||
atomic_dec(&vmbus_connection.offer_in_progress);
|
||||
|
||||
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
|
||||
if (!uuid_le_cmp(channel->offermsg.offer.if_type,
|
||||
newchannel->offermsg.offer.if_type) &&
|
||||
!uuid_le_cmp(channel->offermsg.offer.if_instance,
|
||||
newchannel->offermsg.offer.if_instance)) {
|
||||
fnew = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (fnew)
|
||||
list_add_tail(&newchannel->listentry,
|
||||
&vmbus_connection.chn_list);
|
||||
|
||||
mutex_unlock(&vmbus_connection.channel_mutex);
|
||||
|
||||
if (!fnew) {
|
||||
/*
|
||||
* Check to see if this is a sub-channel.
|
||||
*/
|
||||
if (newchannel->offermsg.offer.sub_channel_index != 0) {
|
||||
/*
|
||||
* Process the sub-channel.
|
||||
*/
|
||||
newchannel->primary_channel = channel;
|
||||
spin_lock_irqsave(&channel->lock, flags);
|
||||
list_add_tail(&newchannel->sc_list, &channel->sc_list);
|
||||
channel->num_sc++;
|
||||
spin_unlock_irqrestore(&channel->lock, flags);
|
||||
} else {
|
||||
goto err_free_chan;
|
||||
}
|
||||
}
|
||||
|
||||
dev_type = hv_get_dev_type(newchannel);
|
||||
|
||||
init_vp_index(newchannel, dev_type);
|
||||
@@ -516,21 +471,22 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
|
||||
/*
|
||||
* This state is used to indicate a successful open
|
||||
* so that when we do close the channel normally, we
|
||||
* can cleanup properly
|
||||
* can cleanup properly.
|
||||
*/
|
||||
newchannel->state = CHANNEL_OPEN_STATE;
|
||||
|
||||
if (!fnew) {
|
||||
if (channel->sc_creation_callback != NULL)
|
||||
channel->sc_creation_callback(newchannel);
|
||||
if (primary_channel != NULL) {
|
||||
/* newchannel is a sub-channel. */
|
||||
|
||||
if (primary_channel->sc_creation_callback != NULL)
|
||||
primary_channel->sc_creation_callback(newchannel);
|
||||
|
||||
newchannel->probe_done = true;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Start the process of binding this offer to the driver
|
||||
* We need to set the DeviceObject field before calling
|
||||
* vmbus_child_dev_add()
|
||||
* Start the process of binding the primary channel to the driver
|
||||
*/
|
||||
newchannel->device_obj = vmbus_device_create(
|
||||
&newchannel->offermsg.offer.if_type,
|
||||
@@ -559,13 +515,28 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
|
||||
|
||||
err_deq_chan:
|
||||
mutex_lock(&vmbus_connection.channel_mutex);
|
||||
list_del(&newchannel->listentry);
|
||||
|
||||
/*
|
||||
* We need to set the flag, otherwise
|
||||
* vmbus_onoffer_rescind() can be blocked.
|
||||
*/
|
||||
newchannel->probe_done = true;
|
||||
|
||||
if (primary_channel == NULL) {
|
||||
list_del(&newchannel->listentry);
|
||||
} else {
|
||||
spin_lock_irqsave(&primary_channel->lock, flags);
|
||||
list_del(&newchannel->sc_list);
|
||||
spin_unlock_irqrestore(&primary_channel->lock, flags);
|
||||
}
|
||||
|
||||
mutex_unlock(&vmbus_connection.channel_mutex);
|
||||
|
||||
if (newchannel->target_cpu != get_cpu()) {
|
||||
put_cpu();
|
||||
smp_call_function_single(newchannel->target_cpu,
|
||||
percpu_channel_deq, newchannel, true);
|
||||
percpu_channel_deq,
|
||||
newchannel, true);
|
||||
} else {
|
||||
percpu_channel_deq(newchannel);
|
||||
put_cpu();
|
||||
@@ -573,14 +544,104 @@ err_deq_chan:
|
||||
|
||||
vmbus_release_relid(newchannel->offermsg.child_relid);
|
||||
|
||||
err_free_chan:
|
||||
free_channel(newchannel);
|
||||
}
|
||||
|
||||
/*
|
||||
* vmbus_process_offer - Process the offer by creating a channel/device
|
||||
* associated with this offer
|
||||
*/
|
||||
static void vmbus_process_offer(struct vmbus_channel *newchannel)
|
||||
{
|
||||
struct vmbus_channel *channel;
|
||||
struct workqueue_struct *wq;
|
||||
unsigned long flags;
|
||||
bool fnew = true;
|
||||
|
||||
mutex_lock(&vmbus_connection.channel_mutex);
|
||||
|
||||
/*
|
||||
* Now that we have acquired the channel_mutex,
|
||||
* we can release the potentially racing rescind thread.
|
||||
*/
|
||||
atomic_dec(&vmbus_connection.offer_in_progress);
|
||||
|
||||
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
|
||||
if (!uuid_le_cmp(channel->offermsg.offer.if_type,
|
||||
newchannel->offermsg.offer.if_type) &&
|
||||
!uuid_le_cmp(channel->offermsg.offer.if_instance,
|
||||
newchannel->offermsg.offer.if_instance)) {
|
||||
fnew = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (fnew)
|
||||
list_add_tail(&newchannel->listentry,
|
||||
&vmbus_connection.chn_list);
|
||||
else {
|
||||
/*
|
||||
* Check to see if this is a valid sub-channel.
|
||||
*/
|
||||
if (newchannel->offermsg.offer.sub_channel_index == 0) {
|
||||
mutex_unlock(&vmbus_connection.channel_mutex);
|
||||
/*
|
||||
* Don't call free_channel(), because newchannel->kobj
|
||||
* is not initialized yet.
|
||||
*/
|
||||
kfree(newchannel);
|
||||
WARN_ON_ONCE(1);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* Process the sub-channel.
|
||||
*/
|
||||
newchannel->primary_channel = channel;
|
||||
spin_lock_irqsave(&channel->lock, flags);
|
||||
list_add_tail(&newchannel->sc_list, &channel->sc_list);
|
||||
spin_unlock_irqrestore(&channel->lock, flags);
|
||||
}
|
||||
|
||||
mutex_unlock(&vmbus_connection.channel_mutex);
|
||||
|
||||
/*
|
||||
* vmbus_process_offer() mustn't call channel->sc_creation_callback()
|
||||
* directly for sub-channels, because sc_creation_callback() ->
|
||||
* vmbus_open() may never get the host's response to the
|
||||
* OPEN_CHANNEL message (the host may rescind a channel at any time,
|
||||
* e.g. in the case of hot removing a NIC), and vmbus_onoffer_rescind()
|
||||
* may not wake up the vmbus_open() as it's blocked due to a non-zero
|
||||
* vmbus_connection.offer_in_progress, and finally we have a deadlock.
|
||||
*
|
||||
* The above is also true for primary channels, if the related device
|
||||
* drivers use sync probing mode by default.
|
||||
*
|
||||
* And, usually the handling of primary channels and sub-channels can
|
||||
* depend on each other, so we should offload them to different
|
||||
* workqueues to avoid possible deadlock, e.g. in sync-probing mode,
|
||||
* NIC1's netvsc_subchan_work() can race with NIC2's netvsc_probe() ->
|
||||
* rtnl_lock(), and causes deadlock: the former gets the rtnl_lock
|
||||
* and waits for all the sub-channels to appear, but the latter
|
||||
* can't get the rtnl_lock and this blocks the handling of
|
||||
* sub-channels.
|
||||
*/
|
||||
INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work);
|
||||
wq = fnew ? vmbus_connection.handle_primary_chan_wq :
|
||||
vmbus_connection.handle_sub_chan_wq;
|
||||
queue_work(wq, &newchannel->add_channel_work);
|
||||
}
|
||||
|
||||
/*
|
||||
* We use this state to statically distribute the channel interrupt load.
|
||||
*/
|
||||
static int next_numa_node_id;
|
||||
/*
|
||||
* init_vp_index() accesses global variables like next_numa_node_id, and
|
||||
* it can run concurrently for primary channels and sub-channels: see
|
||||
* vmbus_process_offer(), so we need the lock to protect the global
|
||||
* variables.
|
||||
*/
|
||||
static DEFINE_SPINLOCK(bind_channel_to_cpu_lock);
|
||||
|
||||
/*
|
||||
* Starting with Win8, we can statically distribute the incoming
|
||||
@@ -618,6 +679,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&bind_channel_to_cpu_lock);
|
||||
|
||||
/*
|
||||
* Based on the channel affinity policy, we will assign the NUMA
|
||||
* nodes.
|
||||
@@ -700,6 +763,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
|
||||
channel->target_cpu = cur_cpu;
|
||||
channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu);
|
||||
|
||||
spin_unlock(&bind_channel_to_cpu_lock);
|
||||
|
||||
free_cpumask_var(available_mask);
|
||||
}
|
||||
|
||||
|
||||
@@ -161,6 +161,20 @@ int vmbus_connect(void)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
vmbus_connection.handle_primary_chan_wq =
|
||||
create_workqueue("hv_pri_chan");
|
||||
if (!vmbus_connection.handle_primary_chan_wq) {
|
||||
ret = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
vmbus_connection.handle_sub_chan_wq =
|
||||
create_workqueue("hv_sub_chan");
|
||||
if (!vmbus_connection.handle_sub_chan_wq) {
|
||||
ret = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&vmbus_connection.chn_msg_list);
|
||||
spin_lock_init(&vmbus_connection.channelmsg_lock);
|
||||
|
||||
@@ -251,10 +265,14 @@ void vmbus_disconnect(void)
|
||||
*/
|
||||
vmbus_initiate_unload(false);
|
||||
|
||||
if (vmbus_connection.work_queue) {
|
||||
drain_workqueue(vmbus_connection.work_queue);
|
||||
if (vmbus_connection.handle_sub_chan_wq)
|
||||
destroy_workqueue(vmbus_connection.handle_sub_chan_wq);
|
||||
|
||||
if (vmbus_connection.handle_primary_chan_wq)
|
||||
destroy_workqueue(vmbus_connection.handle_primary_chan_wq);
|
||||
|
||||
if (vmbus_connection.work_queue)
|
||||
destroy_workqueue(vmbus_connection.work_queue);
|
||||
}
|
||||
|
||||
if (vmbus_connection.int_page) {
|
||||
free_pages((unsigned long)vmbus_connection.int_page, 0);
|
||||
|
||||
@@ -327,7 +327,14 @@ struct vmbus_connection {
|
||||
struct list_head chn_list;
|
||||
struct mutex channel_mutex;
|
||||
|
||||
/*
|
||||
* An offer message is handled first on the work_queue, and then
|
||||
* is further handled on handle_primary_chan_wq or
|
||||
* handle_sub_chan_wq.
|
||||
*/
|
||||
struct workqueue_struct *work_queue;
|
||||
struct workqueue_struct *handle_primary_chan_wq;
|
||||
struct workqueue_struct *handle_sub_chan_wq;
|
||||
};
|
||||
|
||||
|
||||
|
||||
@@ -796,7 +796,8 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
|
||||
entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
|
||||
memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
|
||||
&entry, sizeof(entry));
|
||||
entry = (iommu_virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL;
|
||||
entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
|
||||
(BIT_ULL(52)-1)) & ~7ULL;
|
||||
memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
|
||||
&entry, sizeof(entry));
|
||||
writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
|
||||
|
||||
@@ -3086,7 +3086,7 @@ static int copy_context_table(struct intel_iommu *iommu,
|
||||
}
|
||||
|
||||
if (old_ce)
|
||||
iounmap(old_ce);
|
||||
memunmap(old_ce);
|
||||
|
||||
ret = 0;
|
||||
if (devfn < 0x80)
|
||||
|
||||
@@ -589,7 +589,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
|
||||
pr_err("%s: Page request without PASID: %08llx %08llx\n",
|
||||
iommu->name, ((unsigned long long *)req)[0],
|
||||
((unsigned long long *)req)[1]);
|
||||
goto bad_req;
|
||||
goto no_pasid;
|
||||
}
|
||||
|
||||
if (!svm || svm->pasid != req->pasid) {
|
||||
|
||||
@@ -424,6 +424,9 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
|
||||
|
||||
static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
|
||||
{
|
||||
if (!domain->mmu)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Disable the context. Flush the TLB as required when modifying the
|
||||
* context registers.
|
||||
|
||||
@@ -1592,6 +1592,8 @@ static void isp_pm_complete(struct device *dev)
|
||||
|
||||
static void isp_unregister_entities(struct isp_device *isp)
|
||||
{
|
||||
media_device_unregister(&isp->media_dev);
|
||||
|
||||
omap3isp_csi2_unregister_entities(&isp->isp_csi2a);
|
||||
omap3isp_ccp2_unregister_entities(&isp->isp_ccp2);
|
||||
omap3isp_ccdc_unregister_entities(&isp->isp_ccdc);
|
||||
@@ -1602,7 +1604,6 @@ static void isp_unregister_entities(struct isp_device *isp)
|
||||
omap3isp_stat_unregister_entities(&isp->isp_hist);
|
||||
|
||||
v4l2_device_unregister(&isp->v4l2_dev);
|
||||
media_device_unregister(&isp->media_dev);
|
||||
media_device_cleanup(&isp->media_dev);
|
||||
}
|
||||
|
||||
|
||||
@@ -149,15 +149,15 @@
|
||||
#define NAND_VERSION_MINOR_SHIFT 16
|
||||
|
||||
/* NAND OP_CMDs */
|
||||
#define PAGE_READ 0x2
|
||||
#define PAGE_READ_WITH_ECC 0x3
|
||||
#define PAGE_READ_WITH_ECC_SPARE 0x4
|
||||
#define PROGRAM_PAGE 0x6
|
||||
#define PAGE_PROGRAM_WITH_ECC 0x7
|
||||
#define PROGRAM_PAGE_SPARE 0x9
|
||||
#define BLOCK_ERASE 0xa
|
||||
#define FETCH_ID 0xb
|
||||
#define RESET_DEVICE 0xd
|
||||
#define OP_PAGE_READ 0x2
|
||||
#define OP_PAGE_READ_WITH_ECC 0x3
|
||||
#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
|
||||
#define OP_PROGRAM_PAGE 0x6
|
||||
#define OP_PAGE_PROGRAM_WITH_ECC 0x7
|
||||
#define OP_PROGRAM_PAGE_SPARE 0x9
|
||||
#define OP_BLOCK_ERASE 0xa
|
||||
#define OP_FETCH_ID 0xb
|
||||
#define OP_RESET_DEVICE 0xd
|
||||
|
||||
/* Default Value for NAND_DEV_CMD_VLD */
|
||||
#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
|
||||
@@ -629,11 +629,11 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
|
||||
|
||||
if (read) {
|
||||
if (host->use_ecc)
|
||||
cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
|
||||
cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
|
||||
else
|
||||
cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
|
||||
cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
|
||||
} else {
|
||||
cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
|
||||
cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
|
||||
}
|
||||
|
||||
if (host->use_ecc) {
|
||||
@@ -1030,7 +1030,7 @@ static int nandc_param(struct qcom_nand_host *host)
|
||||
* in use. we configure the controller to perform a raw read of 512
|
||||
* bytes to read onfi params
|
||||
*/
|
||||
nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
|
||||
nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
|
||||
nandc_set_reg(nandc, NAND_ADDR0, 0);
|
||||
nandc_set_reg(nandc, NAND_ADDR1, 0);
|
||||
nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
|
||||
@@ -1084,7 +1084,7 @@ static int erase_block(struct qcom_nand_host *host, int page_addr)
|
||||
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
|
||||
|
||||
nandc_set_reg(nandc, NAND_FLASH_CMD,
|
||||
BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
|
||||
OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
|
||||
nandc_set_reg(nandc, NAND_ADDR0, page_addr);
|
||||
nandc_set_reg(nandc, NAND_ADDR1, 0);
|
||||
nandc_set_reg(nandc, NAND_DEV0_CFG0,
|
||||
@@ -1115,7 +1115,7 @@ static int read_id(struct qcom_nand_host *host, int column)
|
||||
if (column == -1)
|
||||
return 0;
|
||||
|
||||
nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
|
||||
nandc_set_reg(nandc, NAND_FLASH_CMD, OP_FETCH_ID);
|
||||
nandc_set_reg(nandc, NAND_ADDR0, column);
|
||||
nandc_set_reg(nandc, NAND_ADDR1, 0);
|
||||
nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
|
||||
@@ -1136,7 +1136,7 @@ static int reset(struct qcom_nand_host *host)
|
||||
struct nand_chip *chip = &host->chip;
|
||||
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
|
||||
|
||||
nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
|
||||
nandc_set_reg(nandc, NAND_FLASH_CMD, OP_RESET_DEVICE);
|
||||
nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
|
||||
|
||||
write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
|
||||
|
||||
@@ -625,9 +625,23 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor,
|
||||
reg_base + CQSPI_REG_INDIRECTWR);
|
||||
|
||||
while (remaining > 0) {
|
||||
size_t write_words, mod_bytes;
|
||||
|
||||
write_bytes = remaining > page_size ? page_size : remaining;
|
||||
iowrite32_rep(cqspi->ahb_base, txbuf,
|
||||
DIV_ROUND_UP(write_bytes, 4));
|
||||
write_words = write_bytes / 4;
|
||||
mod_bytes = write_bytes % 4;
|
||||
/* Write 4 bytes at a time then single bytes. */
|
||||
if (write_words) {
|
||||
iowrite32_rep(cqspi->ahb_base, txbuf, write_words);
|
||||
txbuf += (write_words * 4);
|
||||
}
|
||||
if (mod_bytes) {
|
||||
unsigned int temp = 0xFFFFFFFF;
|
||||
|
||||
memcpy(&temp, txbuf, mod_bytes);
|
||||
iowrite32(temp, cqspi->ahb_base);
|
||||
txbuf += mod_bytes;
|
||||
}
|
||||
|
||||
ret = wait_for_completion_timeout(&cqspi->transfer_complete,
|
||||
msecs_to_jiffies
|
||||
@@ -638,7 +652,6 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor,
|
||||
goto failwr;
|
||||
}
|
||||
|
||||
txbuf += write_bytes;
|
||||
remaining -= write_bytes;
|
||||
|
||||
if (remaining > 0)
|
||||
|
||||
@@ -24,6 +24,9 @@
|
||||
|
||||
#define RCAR_CAN_DRV_NAME "rcar_can"
|
||||
|
||||
#define RCAR_SUPPORTED_CLOCKS (BIT(CLKR_CLKP1) | BIT(CLKR_CLKP2) | \
|
||||
BIT(CLKR_CLKEXT))
|
||||
|
||||
/* Mailbox configuration:
|
||||
* mailbox 60 - 63 - Rx FIFO mailboxes
|
||||
* mailbox 56 - 59 - Tx FIFO mailboxes
|
||||
@@ -789,7 +792,7 @@ static int rcar_can_probe(struct platform_device *pdev)
|
||||
goto fail_clk;
|
||||
}
|
||||
|
||||
if (clock_select >= ARRAY_SIZE(clock_names)) {
|
||||
if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) {
|
||||
err = -EINVAL;
|
||||
dev_err(&pdev->dev, "invalid CAN clock selected\n");
|
||||
goto fail_clk;
|
||||
|
||||
@@ -1418,7 +1418,7 @@ static int sparc_lance_probe_one(struct platform_device *op,
|
||||
|
||||
prop = of_get_property(nd, "tpe-link-test?", NULL);
|
||||
if (!prop)
|
||||
goto no_link_test;
|
||||
goto node_put;
|
||||
|
||||
if (strcmp(prop, "true")) {
|
||||
printk(KERN_NOTICE "SunLance: warning: overriding option "
|
||||
@@ -1427,6 +1427,8 @@ static int sparc_lance_probe_one(struct platform_device *op,
|
||||
"to ecd@skynet.be\n");
|
||||
auxio_set_lte(AUXIO_LTE_ON);
|
||||
}
|
||||
node_put:
|
||||
of_node_put(nd);
|
||||
no_link_test:
|
||||
lp->auto_select = 1;
|
||||
lp->tpe = 0;
|
||||
|
||||
@@ -2187,6 +2187,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
|
||||
#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
|
||||
E1HVN_MAX)
|
||||
|
||||
/* Following is the DMAE channel number allocation for the clients.
|
||||
* MFW: OCBB/OCSD implementations use DMAE channels 14/15 respectively.
|
||||
* Driver: 0-3 and 8-11 (for PF dmae operations)
|
||||
* 4 and 12 (for stats requests)
|
||||
*/
|
||||
#define BNX2X_FW_DMAE_C 13 /* Channel for FW DMAE operations */
|
||||
|
||||
/* PCIE link and speed */
|
||||
#define PCICFG_LINK_WIDTH 0x1f00000
|
||||
#define PCICFG_LINK_WIDTH_SHIFT 20
|
||||
|
||||
@@ -6149,6 +6149,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
|
||||
rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
|
||||
rdata->path_id = BP_PATH(bp);
|
||||
rdata->network_cos_mode = start_params->network_cos_mode;
|
||||
rdata->dmae_cmd_id = BNX2X_FW_DMAE_C;
|
||||
|
||||
rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port);
|
||||
rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port);
|
||||
|
||||
@@ -870,11 +870,10 @@ static irqreturn_t ftmac100_interrupt(int irq, void *dev_id)
|
||||
struct net_device *netdev = dev_id;
|
||||
struct ftmac100 *priv = netdev_priv(netdev);
|
||||
|
||||
if (likely(netif_running(netdev))) {
|
||||
/* Disable interrupts for polling */
|
||||
ftmac100_disable_all_int(priv);
|
||||
/* Disable interrupts for polling */
|
||||
ftmac100_disable_all_int(priv);
|
||||
if (likely(netif_running(netdev)))
|
||||
napi_schedule(&priv->napi);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@@ -457,8 +457,8 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
|
||||
|
||||
for (j = 0; j < rx_pool->size; j++) {
|
||||
if (rx_pool->rx_buff[j].skb) {
|
||||
dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
|
||||
rx_pool->rx_buff[i].skb = NULL;
|
||||
dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
|
||||
rx_pool->rx_buff[j].skb = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -337,7 +337,7 @@ void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
|
||||
static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count,
|
||||
int align, u32 skip_mask, u32 *puid)
|
||||
{
|
||||
u32 uid;
|
||||
u32 uid = 0;
|
||||
u32 res;
|
||||
struct mlx4_zone_allocator *zone_alloc = zone->allocator;
|
||||
struct mlx4_zone_entry *curr_node;
|
||||
|
||||
@@ -541,8 +541,8 @@ struct slave_list {
|
||||
struct resource_allocator {
|
||||
spinlock_t alloc_lock; /* protect quotas */
|
||||
union {
|
||||
int res_reserved;
|
||||
int res_port_rsvd[MLX4_MAX_PORTS];
|
||||
unsigned int res_reserved;
|
||||
unsigned int res_port_rsvd[MLX4_MAX_PORTS];
|
||||
};
|
||||
union {
|
||||
int res_free;
|
||||
|
||||
@@ -363,6 +363,7 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
|
||||
container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
|
||||
buf);
|
||||
|
||||
(*mpt_entry)->lkey = 0;
|
||||
err = mlx4_SW2HW_MPT(dev, mailbox, key);
|
||||
}
|
||||
|
||||
|
||||
@@ -440,8 +440,16 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
|
||||
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
|
||||
|
||||
/* Can't have multiple flags set here */
|
||||
if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1)
|
||||
if (bitmap_weight((unsigned long *)&pq_flags,
|
||||
sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
|
||||
DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
|
||||
DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
|
||||
goto err;
|
||||
}
|
||||
|
||||
switch (pq_flags) {
|
||||
case PQ_FLAGS_RLS:
|
||||
@@ -465,8 +473,7 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
|
||||
}
|
||||
|
||||
err:
|
||||
DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
|
||||
return NULL;
|
||||
return &qm_info->start_pq;
|
||||
}
|
||||
|
||||
/* save pq index in qm info */
|
||||
@@ -490,20 +497,32 @@ u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc)
|
||||
{
|
||||
u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn);
|
||||
|
||||
if (max_tc == 0) {
|
||||
DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
|
||||
PQ_FLAGS_MCOS);
|
||||
return p_hwfn->qm_info.start_pq;
|
||||
}
|
||||
|
||||
if (tc > max_tc)
|
||||
DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
|
||||
|
||||
return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
|
||||
return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc);
|
||||
}
|
||||
|
||||
u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
|
||||
{
|
||||
u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn);
|
||||
|
||||
if (max_vf == 0) {
|
||||
DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
|
||||
PQ_FLAGS_VFS);
|
||||
return p_hwfn->qm_info.start_pq;
|
||||
}
|
||||
|
||||
if (vf > max_vf)
|
||||
DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
|
||||
|
||||
return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
|
||||
return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf);
|
||||
}
|
||||
|
||||
u16 qed_get_cm_pq_idx_rl(struct qed_hwfn *p_hwfn, u8 rl)
|
||||
|
||||
@@ -992,6 +992,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
|
||||
*/
|
||||
do {
|
||||
index = p_sb_attn->sb_index;
|
||||
/* finish reading index before the loop condition */
|
||||
dma_rmb();
|
||||
attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
|
||||
attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
|
||||
} while (index != p_sb_attn->sb_index);
|
||||
|
||||
@@ -1561,9 +1561,9 @@ static int qed_drain(struct qed_dev *cdev)
|
||||
return -EBUSY;
|
||||
}
|
||||
rc = qed_mcp_drain(hwfn, ptt);
|
||||
qed_ptt_release(hwfn, ptt);
|
||||
if (rc)
|
||||
return rc;
|
||||
qed_ptt_release(hwfn, ptt);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -989,8 +989,6 @@ static void team_port_disable(struct team *team,
|
||||
team->en_port_count--;
|
||||
team_queue_override_port_del(team, port);
|
||||
team_adjust_ops(team);
|
||||
team_notify_peers(team);
|
||||
team_mcast_rejoin(team);
|
||||
team_lower_state_changed(port);
|
||||
}
|
||||
|
||||
|
||||
@@ -193,6 +193,9 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
|
||||
}
|
||||
break;
|
||||
case BRCMU_CHSPEC_D11AC_BW_160:
|
||||
ch->bw = BRCMU_CHAN_BW_160;
|
||||
ch->sb = brcmu_maskget16(ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
|
||||
BRCMU_CHSPEC_D11AC_SB_SHIFT);
|
||||
switch (ch->sb) {
|
||||
case BRCMU_CHAN_SB_LLL:
|
||||
ch->control_ch_num -= CH_70MHZ_APART;
|
||||
|
||||
@@ -2698,6 +2698,10 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
|
||||
|
||||
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
|
||||
|
||||
tasklet_hrtimer_init(&data->beacon_timer,
|
||||
mac80211_hwsim_beacon,
|
||||
CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
||||
|
||||
err = ieee80211_register_hw(hw);
|
||||
if (err < 0) {
|
||||
printk(KERN_DEBUG "mac80211_hwsim: ieee80211_register_hw failed (%d)\n",
|
||||
@@ -2722,10 +2726,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
|
||||
data->debugfs,
|
||||
data, &hwsim_simulate_radar);
|
||||
|
||||
tasklet_hrtimer_init(&data->beacon_timer,
|
||||
mac80211_hwsim_beacon,
|
||||
CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
||||
|
||||
spin_lock_bh(&hwsim_radio_lock);
|
||||
list_add_tail(&data->list, &hwsim_radios);
|
||||
spin_unlock_bh(&hwsim_radio_lock);
|
||||
|
||||
@@ -105,6 +105,8 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
|
||||
struct nd_mapping *nd_mapping, resource_size_t *overlap);
|
||||
resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
|
||||
resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
|
||||
int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
|
||||
resource_size_t size);
|
||||
resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
|
||||
struct nd_label_id *label_id);
|
||||
int alias_dpa_busy(struct device *dev, void *data);
|
||||
|
||||
@@ -589,14 +589,47 @@ static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
|
||||
ALIGN_DOWN(phys, nd_pfn->align));
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if pmem collides with 'System RAM', or other regions when
|
||||
* section aligned. Trim it accordingly.
|
||||
*/
|
||||
static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trunc)
|
||||
{
|
||||
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
||||
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
||||
struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
|
||||
const resource_size_t start = nsio->res.start;
|
||||
const resource_size_t end = start + resource_size(&nsio->res);
|
||||
resource_size_t adjust, size;
|
||||
|
||||
*start_pad = 0;
|
||||
*end_trunc = 0;
|
||||
|
||||
adjust = start - PHYS_SECTION_ALIGN_DOWN(start);
|
||||
size = resource_size(&nsio->res) + adjust;
|
||||
if (region_intersects(start - adjust, size, IORESOURCE_SYSTEM_RAM,
|
||||
IORES_DESC_NONE) == REGION_MIXED
|
||||
|| nd_region_conflict(nd_region, start - adjust, size))
|
||||
*start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
|
||||
|
||||
/* Now check that end of the range does not collide. */
|
||||
adjust = PHYS_SECTION_ALIGN_UP(end) - end;
|
||||
size = resource_size(&nsio->res) + adjust;
|
||||
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
|
||||
IORES_DESC_NONE) == REGION_MIXED
|
||||
|| !IS_ALIGNED(end, nd_pfn->align)
|
||||
|| nd_region_conflict(nd_region, start, size + adjust))
|
||||
*end_trunc = end - phys_pmem_align_down(nd_pfn, end);
|
||||
}
|
||||
|
||||
static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
||||
{
|
||||
u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
|
||||
struct nd_namespace_common *ndns = nd_pfn->ndns;
|
||||
u32 start_pad = 0, end_trunc = 0;
|
||||
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
||||
resource_size_t start, size;
|
||||
struct nd_namespace_io *nsio;
|
||||
struct nd_region *nd_region;
|
||||
u32 start_pad, end_trunc;
|
||||
struct nd_pfn_sb *pfn_sb;
|
||||
unsigned long npfns;
|
||||
phys_addr_t offset;
|
||||
@@ -628,30 +661,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
||||
|
||||
memset(pfn_sb, 0, sizeof(*pfn_sb));
|
||||
|
||||
/*
|
||||
* Check if pmem collides with 'System RAM' when section aligned and
|
||||
* trim it accordingly
|
||||
*/
|
||||
nsio = to_nd_namespace_io(&ndns->dev);
|
||||
start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
|
||||
size = resource_size(&nsio->res);
|
||||
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
|
||||
IORES_DESC_NONE) == REGION_MIXED) {
|
||||
start = nsio->res.start;
|
||||
start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
|
||||
}
|
||||
|
||||
start = nsio->res.start;
|
||||
size = PHYS_SECTION_ALIGN_UP(start + size) - start;
|
||||
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
|
||||
IORES_DESC_NONE) == REGION_MIXED
|
||||
|| !IS_ALIGNED(start + resource_size(&nsio->res),
|
||||
nd_pfn->align)) {
|
||||
size = resource_size(&nsio->res);
|
||||
end_trunc = start + size - phys_pmem_align_down(nd_pfn,
|
||||
start + size);
|
||||
}
|
||||
|
||||
trim_pfn_device(nd_pfn, &start_pad, &end_trunc);
|
||||
if (start_pad + end_trunc)
|
||||
dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
|
||||
dev_name(&ndns->dev), start_pad + end_trunc);
|
||||
@@ -662,7 +672,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
||||
* implementation will limit the pfns advertised through
|
||||
* ->direct_access() to those that are included in the memmap.
|
||||
*/
|
||||
start += start_pad;
|
||||
start = nsio->res.start + start_pad;
|
||||
size = resource_size(&nsio->res);
|
||||
npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
|
||||
/ PAGE_SIZE);
|
||||
|
||||
@@ -1112,6 +1112,47 @@ int nvdimm_has_cache(struct nd_region *nd_region)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvdimm_has_cache);
|
||||
|
||||
struct conflict_context {
|
||||
struct nd_region *nd_region;
|
||||
resource_size_t start, size;
|
||||
};
|
||||
|
||||
static int region_conflict(struct device *dev, void *data)
|
||||
{
|
||||
struct nd_region *nd_region;
|
||||
struct conflict_context *ctx = data;
|
||||
resource_size_t res_end, region_end, region_start;
|
||||
|
||||
if (!is_memory(dev))
|
||||
return 0;
|
||||
|
||||
nd_region = to_nd_region(dev);
|
||||
if (nd_region == ctx->nd_region)
|
||||
return 0;
|
||||
|
||||
res_end = ctx->start + ctx->size;
|
||||
region_start = nd_region->ndr_start;
|
||||
region_end = region_start + nd_region->ndr_size;
|
||||
if (ctx->start >= region_start && ctx->start < region_end)
|
||||
return -EBUSY;
|
||||
if (res_end > region_start && res_end <= region_end)
|
||||
return -EBUSY;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
|
||||
resource_size_t size)
|
||||
{
|
||||
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
|
||||
struct conflict_context ctx = {
|
||||
.nd_region = nd_region,
|
||||
.start = start,
|
||||
.size = size,
|
||||
};
|
||||
|
||||
return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
|
||||
}
|
||||
|
||||
void __exit nd_region_devs_exit(void)
|
||||
{
|
||||
ida_destroy(®ion_ida);
|
||||
|
||||
@@ -59,6 +59,7 @@ struct virtio_ccw_device {
|
||||
unsigned int revision; /* Transport revision */
|
||||
wait_queue_head_t wait_q;
|
||||
spinlock_t lock;
|
||||
struct mutex io_lock; /* Serializes I/O requests */
|
||||
struct list_head virtqueues;
|
||||
unsigned long indicators;
|
||||
unsigned long indicators2;
|
||||
@@ -299,6 +300,7 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
|
||||
unsigned long flags;
|
||||
int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
|
||||
|
||||
mutex_lock(&vcdev->io_lock);
|
||||
do {
|
||||
spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
|
||||
ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
|
||||
@@ -311,7 +313,9 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
|
||||
cpu_relax();
|
||||
} while (ret == -EBUSY);
|
||||
wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
|
||||
return ret ? ret : vcdev->err;
|
||||
ret = ret ? ret : vcdev->err;
|
||||
mutex_unlock(&vcdev->io_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
|
||||
@@ -831,6 +835,7 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
|
||||
int ret;
|
||||
struct ccw1 *ccw;
|
||||
void *config_area;
|
||||
unsigned long flags;
|
||||
|
||||
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
|
||||
if (!ccw)
|
||||
@@ -849,11 +854,13 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
spin_lock_irqsave(&vcdev->lock, flags);
|
||||
memcpy(vcdev->config, config_area, offset + len);
|
||||
if (buf)
|
||||
memcpy(buf, &vcdev->config[offset], len);
|
||||
if (vcdev->config_ready < offset + len)
|
||||
vcdev->config_ready = offset + len;
|
||||
spin_unlock_irqrestore(&vcdev->lock, flags);
|
||||
if (buf)
|
||||
memcpy(buf, config_area + offset, len);
|
||||
|
||||
out_free:
|
||||
kfree(config_area);
|
||||
@@ -867,6 +874,7 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
|
||||
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
|
||||
struct ccw1 *ccw;
|
||||
void *config_area;
|
||||
unsigned long flags;
|
||||
|
||||
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
|
||||
if (!ccw)
|
||||
@@ -879,9 +887,11 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
|
||||
/* Make sure we don't overwrite fields. */
|
||||
if (vcdev->config_ready < offset)
|
||||
virtio_ccw_get_config(vdev, 0, NULL, offset);
|
||||
spin_lock_irqsave(&vcdev->lock, flags);
|
||||
memcpy(&vcdev->config[offset], buf, len);
|
||||
/* Write the config area to the host. */
|
||||
memcpy(config_area, vcdev->config, sizeof(vcdev->config));
|
||||
spin_unlock_irqrestore(&vcdev->lock, flags);
|
||||
ccw->cmd_code = CCW_CMD_WRITE_CONF;
|
||||
ccw->flags = 0;
|
||||
ccw->count = offset + len;
|
||||
@@ -1250,6 +1260,7 @@ static int virtio_ccw_online(struct ccw_device *cdev)
|
||||
init_waitqueue_head(&vcdev->wait_q);
|
||||
INIT_LIST_HEAD(&vcdev->virtqueues);
|
||||
spin_lock_init(&vcdev->lock);
|
||||
mutex_init(&vcdev->io_lock);
|
||||
|
||||
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
|
||||
dev_set_drvdata(&cdev->dev, vcdev);
|
||||
|
||||
@@ -354,8 +354,7 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
|
||||
CERROR("Can't allocate net interface name\n");
|
||||
goto failed;
|
||||
}
|
||||
strncpy(ni->ni_interfaces[niface], iface,
|
||||
strlen(iface));
|
||||
strcpy(ni->ni_interfaces[niface], iface);
|
||||
niface++;
|
||||
iface = comma;
|
||||
} while (iface);
|
||||
|
||||
@@ -645,7 +645,7 @@ repeat_fid2path:
|
||||
memmove(ptr + strlen(gf->gf_path) + 1, ptr,
|
||||
strlen(ori_gf->gf_path));
|
||||
|
||||
strncpy(ptr, gf->gf_path, strlen(gf->gf_path));
|
||||
strcpy(ptr, gf->gf_path);
|
||||
ptr += strlen(gf->gf_path);
|
||||
*ptr = '/';
|
||||
}
|
||||
|
||||
@@ -2860,9 +2860,7 @@ ia_css_debug_pipe_graph_dump_stage(
|
||||
if (l <= ENABLE_LINE_MAX_LENGTH) {
|
||||
/* It fits on one line, copy string and init */
|
||||
/* other helper strings with empty string */
|
||||
strcpy_s(enable_info,
|
||||
sizeof(enable_info),
|
||||
ei);
|
||||
strscpy(enable_info, ei, sizeof(enable_info));
|
||||
} else {
|
||||
/* Too big for one line, find last comma */
|
||||
p = ENABLE_LINE_MAX_LENGTH;
|
||||
|
||||
@@ -158,7 +158,7 @@ void r8712_report_sec_ie(struct _adapter *adapter, u8 authmode, u8 *sec_ie)
|
||||
p = buff;
|
||||
p += sprintf(p, "ASSOCINFO(ReqIEs=");
|
||||
len = sec_ie[1] + 2;
|
||||
len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX - 1;
|
||||
len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX;
|
||||
for (i = 0; i < len; i++)
|
||||
p += sprintf(p, "%02x", sec_ie[i]);
|
||||
p += sprintf(p, ")");
|
||||
|
||||
@@ -1361,7 +1361,7 @@ sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie,
|
||||
u8 *out_ie, uint in_len)
|
||||
{
|
||||
u8 authmode = 0, match;
|
||||
u8 sec_ie[255], uncst_oui[4], bkup_ie[255];
|
||||
u8 sec_ie[IW_CUSTOM_MAX], uncst_oui[4], bkup_ie[255];
|
||||
u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
|
||||
uint ielength, cnt, remove_cnt;
|
||||
int iEntry;
|
||||
|
||||
@@ -1574,7 +1574,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
|
||||
if (pstat->aid > 0) {
|
||||
DBG_871X(" old AID %d\n", pstat->aid);
|
||||
} else {
|
||||
for (pstat->aid = 1; pstat->aid < NUM_STA; pstat->aid++)
|
||||
for (pstat->aid = 1; pstat->aid <= NUM_STA; pstat->aid++)
|
||||
if (pstapriv->sta_aid[pstat->aid - 1] == NULL)
|
||||
break;
|
||||
|
||||
|
||||
@@ -222,17 +222,17 @@ static int mtk8250_probe(struct platform_device *pdev)
|
||||
|
||||
platform_set_drvdata(pdev, data);
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
if (!pm_runtime_enabled(&pdev->dev)) {
|
||||
err = mtk8250_runtime_resume(&pdev->dev);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
err = mtk8250_runtime_resume(&pdev->dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
data->line = serial8250_register_8250_port(&uart);
|
||||
if (data->line < 0)
|
||||
return data->line;
|
||||
|
||||
pm_runtime_set_active(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -243,13 +243,11 @@ static int mtk8250_remove(struct platform_device *pdev)
|
||||
pm_runtime_get_sync(&pdev->dev);
|
||||
|
||||
serial8250_unregister_port(data->line);
|
||||
mtk8250_runtime_suspend(&pdev->dev);
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
|
||||
if (!pm_runtime_status_suspended(&pdev->dev))
|
||||
mtk8250_runtime_suspend(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -233,7 +233,7 @@ static void kgdboc_put_char(u8 chr)
|
||||
static int param_set_kgdboc_var(const char *kmessage,
|
||||
const struct kernel_param *kp)
|
||||
{
|
||||
int len = strlen(kmessage);
|
||||
size_t len = strlen(kmessage);
|
||||
|
||||
if (len >= MAX_CONFIG_LEN) {
|
||||
printk(KERN_ERR "kgdboc: config string too long\n");
|
||||
@@ -255,7 +255,7 @@ static int param_set_kgdboc_var(const char *kmessage,
|
||||
|
||||
strcpy(config, kmessage);
|
||||
/* Chop out \n char as a result of echo */
|
||||
if (config[len - 1] == '\n')
|
||||
if (len && config[len - 1] == '\n')
|
||||
config[len - 1] = '\0';
|
||||
|
||||
if (configured == 1)
|
||||
|
||||
@@ -639,7 +639,8 @@ void tty_port_close(struct tty_port *port, struct tty_struct *tty,
|
||||
if (tty_port_close_start(port, tty, filp) == 0)
|
||||
return;
|
||||
tty_port_shutdown(port, tty);
|
||||
set_bit(TTY_IO_ERROR, &tty->flags);
|
||||
if (!port->console)
|
||||
set_bit(TTY_IO_ERROR, &tty->flags);
|
||||
tty_port_close_end(port, tty);
|
||||
tty_port_tty_set(port, NULL);
|
||||
}
|
||||
|
||||
@@ -2231,7 +2231,7 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
|
||||
/* descriptor may appear anywhere in config */
|
||||
err = __usb_get_extra_descriptor(udev->rawdescriptors[0],
|
||||
le16_to_cpu(udev->config[0].desc.wTotalLength),
|
||||
USB_DT_OTG, (void **) &desc);
|
||||
USB_DT_OTG, (void **) &desc, sizeof(*desc));
|
||||
if (err || !(desc->bmAttributes & USB_OTG_HNP))
|
||||
return 0;
|
||||
|
||||
|
||||
@@ -188,6 +188,10 @@ static const struct usb_device_id usb_quirk_list[] = {
|
||||
/* Midiman M-Audio Keystation 88es */
|
||||
{ USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME },
|
||||
|
||||
/* SanDisk Ultra Fit and Ultra Flair */
|
||||
{ USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM },
|
||||
{ USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
|
||||
|
||||
/* M-Systems Flash Disk Pioneers */
|
||||
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
|
||||
|
||||
|
||||
@@ -833,14 +833,14 @@ EXPORT_SYMBOL_GPL(usb_get_current_frame_number);
|
||||
*/
|
||||
|
||||
int __usb_get_extra_descriptor(char *buffer, unsigned size,
|
||||
unsigned char type, void **ptr)
|
||||
unsigned char type, void **ptr, size_t minsize)
|
||||
{
|
||||
struct usb_descriptor_header *header;
|
||||
|
||||
while (size >= sizeof(struct usb_descriptor_header)) {
|
||||
header = (struct usb_descriptor_header *)buffer;
|
||||
|
||||
if (header->bLength < 2) {
|
||||
if (header->bLength < 2 || header->bLength > size) {
|
||||
printk(KERN_ERR
|
||||
"%s: bogus descriptor, type %d length %d\n",
|
||||
usbcore_name,
|
||||
@@ -849,7 +849,7 @@ int __usb_get_extra_descriptor(char *buffer, unsigned size,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (header->bDescriptorType == type) {
|
||||
if (header->bDescriptorType == type && header->bLength >= minsize) {
|
||||
*ptr = header;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -219,7 +219,6 @@ struct ffs_io_data {
|
||||
|
||||
struct mm_struct *mm;
|
||||
struct work_struct work;
|
||||
struct work_struct cancellation_work;
|
||||
|
||||
struct usb_ep *ep;
|
||||
struct usb_request *req;
|
||||
@@ -1074,31 +1073,22 @@ ffs_epfile_open(struct inode *inode, struct file *file)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ffs_aio_cancel_worker(struct work_struct *work)
|
||||
{
|
||||
struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
|
||||
cancellation_work);
|
||||
|
||||
ENTER();
|
||||
|
||||
usb_ep_dequeue(io_data->ep, io_data->req);
|
||||
}
|
||||
|
||||
static int ffs_aio_cancel(struct kiocb *kiocb)
|
||||
{
|
||||
struct ffs_io_data *io_data = kiocb->private;
|
||||
struct ffs_data *ffs = io_data->ffs;
|
||||
struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
|
||||
int value;
|
||||
|
||||
ENTER();
|
||||
|
||||
if (likely(io_data && io_data->ep && io_data->req)) {
|
||||
INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker);
|
||||
queue_work(ffs->io_completion_wq, &io_data->cancellation_work);
|
||||
value = -EINPROGRESS;
|
||||
} else {
|
||||
spin_lock_irq(&epfile->ffs->eps_lock);
|
||||
|
||||
if (likely(io_data && io_data->ep && io_data->req))
|
||||
value = usb_ep_dequeue(io_data->ep, io_data->req);
|
||||
else
|
||||
value = -EINVAL;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&epfile->ffs->eps_lock);
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
@@ -654,7 +654,7 @@ static int hwahc_security_create(struct hwahc *hwahc)
|
||||
top = itr + itr_size;
|
||||
result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index],
|
||||
le16_to_cpu(usb_dev->actconfig->desc.wTotalLength),
|
||||
USB_DT_SECURITY, (void **) &secd);
|
||||
USB_DT_SECURITY, (void **) &secd, sizeof(*secd));
|
||||
if (result == -1) {
|
||||
dev_warn(dev, "BUG? WUSB host has no security descriptors\n");
|
||||
return 0;
|
||||
|
||||
@@ -144,6 +144,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
|
||||
pdev->device == 0x43bb))
|
||||
xhci->quirks |= XHCI_SUSPEND_DELAY;
|
||||
|
||||
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
|
||||
(pdev->device == 0x15e0 || pdev->device == 0x15e1))
|
||||
xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
|
||||
|
||||
if (pdev->vendor == PCI_VENDOR_ID_AMD)
|
||||
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
|
||||
|
||||
|
||||
@@ -918,6 +918,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
|
||||
unsigned int delay = XHCI_MAX_HALT_USEC;
|
||||
struct usb_hcd *hcd = xhci_to_hcd(xhci);
|
||||
u32 command;
|
||||
u32 res;
|
||||
|
||||
if (!hcd->state)
|
||||
return 0;
|
||||
@@ -969,11 +970,28 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
|
||||
command = readl(&xhci->op_regs->command);
|
||||
command |= CMD_CSS;
|
||||
writel(command, &xhci->op_regs->command);
|
||||
xhci->broken_suspend = 0;
|
||||
if (xhci_handshake(&xhci->op_regs->status,
|
||||
STS_SAVE, 0, 10 * 1000)) {
|
||||
xhci_warn(xhci, "WARN: xHC save state timeout\n");
|
||||
spin_unlock_irq(&xhci->lock);
|
||||
return -ETIMEDOUT;
|
||||
/*
|
||||
* AMD SNPS xHC 3.0 occasionally does not clear the
|
||||
* SSS bit of USBSTS and when driver tries to poll
|
||||
* to see if the xHC clears BIT(8) which never happens
|
||||
* and driver assumes that controller is not responding
|
||||
* and times out. To workaround this, its good to check
|
||||
* if SRE and HCE bits are not set (as per xhci
|
||||
* Section 5.4.2) and bypass the timeout.
|
||||
*/
|
||||
res = readl(&xhci->op_regs->status);
|
||||
if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
|
||||
(((res & STS_SRE) == 0) &&
|
||||
((res & STS_HCE) == 0))) {
|
||||
xhci->broken_suspend = 1;
|
||||
} else {
|
||||
xhci_warn(xhci, "WARN: xHC save state timeout\n");
|
||||
spin_unlock_irq(&xhci->lock);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&xhci->lock);
|
||||
|
||||
@@ -1026,7 +1044,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
|
||||
set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
|
||||
|
||||
spin_lock_irq(&xhci->lock);
|
||||
if (xhci->quirks & XHCI_RESET_ON_RESUME)
|
||||
if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
|
||||
hibernated = true;
|
||||
|
||||
if (!hibernated) {
|
||||
@@ -4363,6 +4381,14 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
|
||||
{
|
||||
unsigned long long timeout_ns;
|
||||
|
||||
/* Prevent U1 if service interval is shorter than U1 exit latency */
|
||||
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
|
||||
if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
|
||||
dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
|
||||
return USB3_LPM_DISABLED;
|
||||
}
|
||||
}
|
||||
|
||||
if (xhci->quirks & XHCI_INTEL_HOST)
|
||||
timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
|
||||
else
|
||||
@@ -4419,6 +4445,14 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
|
||||
{
|
||||
unsigned long long timeout_ns;
|
||||
|
||||
/* Prevent U2 if service interval is shorter than U2 exit latency */
|
||||
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
|
||||
if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
|
||||
dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
|
||||
return USB3_LPM_DISABLED;
|
||||
}
|
||||
}
|
||||
|
||||
if (xhci->quirks & XHCI_INTEL_HOST)
|
||||
timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
|
||||
else
|
||||
|
||||
@@ -1839,6 +1839,7 @@ struct xhci_hcd {
|
||||
#define XHCI_SUSPEND_DELAY BIT_ULL(30)
|
||||
#define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31)
|
||||
#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
|
||||
#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
|
||||
|
||||
unsigned int num_active_eps;
|
||||
unsigned int limit_active_eps;
|
||||
@@ -1870,6 +1871,8 @@ struct xhci_hcd {
|
||||
|
||||
/* platform-specific data -- must come last */
|
||||
unsigned long priv[0] __aligned(sizeof(s64));
|
||||
/* Broken Suspend flag for SNPS Suspend resume issue */
|
||||
u8 broken_suspend;
|
||||
};
|
||||
|
||||
/* Platform specific overrides to generic XHCI hc_driver ops */
|
||||
|
||||
@@ -64,6 +64,7 @@ static const struct usb_device_id appledisplay_table[] = {
|
||||
{ APPLEDISPLAY_DEVICE(0x921c) },
|
||||
{ APPLEDISPLAY_DEVICE(0x921d) },
|
||||
{ APPLEDISPLAY_DEVICE(0x9222) },
|
||||
{ APPLEDISPLAY_DEVICE(0x9226) },
|
||||
{ APPLEDISPLAY_DEVICE(0x9236) },
|
||||
|
||||
/* Terminating entry */
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include <net/sock.h>
|
||||
#include <linux/virtio_vsock.h>
|
||||
#include <linux/vhost.h>
|
||||
#include <linux/hashtable.h>
|
||||
|
||||
#include <net/af_vsock.h>
|
||||
#include "vhost.h"
|
||||
@@ -27,14 +28,14 @@ enum {
|
||||
|
||||
/* Used to track all the vhost_vsock instances on the system. */
|
||||
static DEFINE_SPINLOCK(vhost_vsock_lock);
|
||||
static LIST_HEAD(vhost_vsock_list);
|
||||
static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
|
||||
|
||||
struct vhost_vsock {
|
||||
struct vhost_dev dev;
|
||||
struct vhost_virtqueue vqs[2];
|
||||
|
||||
/* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
|
||||
struct list_head list;
|
||||
/* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
|
||||
struct hlist_node hash;
|
||||
|
||||
struct vhost_work send_pkt_work;
|
||||
spinlock_t send_pkt_list_lock;
|
||||
@@ -50,11 +51,14 @@ static u32 vhost_transport_get_local_cid(void)
|
||||
return VHOST_VSOCK_DEFAULT_HOST_CID;
|
||||
}
|
||||
|
||||
static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
|
||||
/* Callers that dereference the return value must hold vhost_vsock_lock or the
|
||||
* RCU read lock.
|
||||
*/
|
||||
static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
|
||||
{
|
||||
struct vhost_vsock *vsock;
|
||||
|
||||
list_for_each_entry(vsock, &vhost_vsock_list, list) {
|
||||
hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
|
||||
u32 other_cid = vsock->guest_cid;
|
||||
|
||||
/* Skip instances that have no CID yet */
|
||||
@@ -69,17 +73,6 @@ static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
|
||||
{
|
||||
struct vhost_vsock *vsock;
|
||||
|
||||
spin_lock_bh(&vhost_vsock_lock);
|
||||
vsock = __vhost_vsock_get(guest_cid);
|
||||
spin_unlock_bh(&vhost_vsock_lock);
|
||||
|
||||
return vsock;
|
||||
}
|
||||
|
||||
static void
|
||||
vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
|
||||
struct vhost_virtqueue *vq)
|
||||
@@ -210,9 +203,12 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
|
||||
struct vhost_vsock *vsock;
|
||||
int len = pkt->len;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
/* Find the vhost_vsock according to guest context id */
|
||||
vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
|
||||
if (!vsock) {
|
||||
rcu_read_unlock();
|
||||
virtio_transport_free_pkt(pkt);
|
||||
return -ENODEV;
|
||||
}
|
||||
@@ -225,6 +221,8 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
|
||||
spin_unlock_bh(&vsock->send_pkt_list_lock);
|
||||
|
||||
vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
|
||||
|
||||
rcu_read_unlock();
|
||||
return len;
|
||||
}
|
||||
|
||||
@@ -234,12 +232,15 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
|
||||
struct vhost_vsock *vsock;
|
||||
struct virtio_vsock_pkt *pkt, *n;
|
||||
int cnt = 0;
|
||||
int ret = -ENODEV;
|
||||
LIST_HEAD(freeme);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
/* Find the vhost_vsock according to guest context id */
|
||||
vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
|
||||
if (!vsock)
|
||||
return -ENODEV;
|
||||
goto out;
|
||||
|
||||
spin_lock_bh(&vsock->send_pkt_list_lock);
|
||||
list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
|
||||
@@ -265,7 +266,10 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
|
||||
vhost_poll_queue(&tx_vq->poll);
|
||||
}
|
||||
|
||||
return 0;
|
||||
ret = 0;
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct virtio_vsock_pkt *
|
||||
@@ -531,10 +535,6 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
|
||||
spin_lock_init(&vsock->send_pkt_list_lock);
|
||||
INIT_LIST_HEAD(&vsock->send_pkt_list);
|
||||
vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
|
||||
|
||||
spin_lock_bh(&vhost_vsock_lock);
|
||||
list_add_tail(&vsock->list, &vhost_vsock_list);
|
||||
spin_unlock_bh(&vhost_vsock_lock);
|
||||
return 0;
|
||||
|
||||
out:
|
||||
@@ -575,9 +575,13 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
|
||||
struct vhost_vsock *vsock = file->private_data;
|
||||
|
||||
spin_lock_bh(&vhost_vsock_lock);
|
||||
list_del(&vsock->list);
|
||||
if (vsock->guest_cid)
|
||||
hash_del_rcu(&vsock->hash);
|
||||
spin_unlock_bh(&vhost_vsock_lock);
|
||||
|
||||
/* Wait for other CPUs to finish using vsock */
|
||||
synchronize_rcu();
|
||||
|
||||
/* Iterating over all connections for all CIDs to find orphans is
|
||||
* inefficient. Room for improvement here. */
|
||||
vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
|
||||
@@ -618,12 +622,17 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
|
||||
|
||||
/* Refuse if CID is already in use */
|
||||
spin_lock_bh(&vhost_vsock_lock);
|
||||
other = __vhost_vsock_get(guest_cid);
|
||||
other = vhost_vsock_get(guest_cid);
|
||||
if (other && other != vsock) {
|
||||
spin_unlock_bh(&vhost_vsock_lock);
|
||||
return -EADDRINUSE;
|
||||
}
|
||||
|
||||
if (vsock->guest_cid)
|
||||
hash_del_rcu(&vsock->hash);
|
||||
|
||||
vsock->guest_cid = guest_cid;
|
||||
hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
|
||||
spin_unlock_bh(&vhost_vsock_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -174,7 +174,7 @@ cifs_bp_rename_retry:
|
||||
|
||||
cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath);
|
||||
memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1);
|
||||
full_path[dfsplen] = '\\';
|
||||
full_path[dfsplen] = dirsep;
|
||||
for (i = 0; i < pplen-1; i++)
|
||||
if (full_path[dfsplen+1+i] == '/')
|
||||
full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb);
|
||||
|
||||
@@ -545,6 +545,9 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
|
||||
struct bio_post_read_ctx *ctx;
|
||||
unsigned int post_read_steps = 0;
|
||||
|
||||
if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
|
||||
if (!bio)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@@ -68,14 +68,16 @@ static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
|
||||
}
|
||||
}
|
||||
|
||||
static bool __written_first_block(struct f2fs_sb_info *sbi,
|
||||
static int __written_first_block(struct f2fs_sb_info *sbi,
|
||||
struct f2fs_inode *ri)
|
||||
{
|
||||
block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
|
||||
|
||||
if (is_valid_data_blkaddr(sbi, addr))
|
||||
return true;
|
||||
return false;
|
||||
if (!__is_valid_data_blkaddr(addr))
|
||||
return 1;
|
||||
if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
|
||||
@@ -259,6 +261,7 @@ static int do_read_inode(struct inode *inode)
|
||||
struct page *node_page;
|
||||
struct f2fs_inode *ri;
|
||||
projid_t i_projid;
|
||||
int err;
|
||||
|
||||
/* Check if ino is within scope */
|
||||
if (check_nid_range(sbi, inode->i_ino))
|
||||
@@ -313,7 +316,12 @@ static int do_read_inode(struct inode *inode)
|
||||
/* get rdev by using inline_info */
|
||||
__get_inode_rdev(inode, ri);
|
||||
|
||||
if (__written_first_block(sbi, ri))
|
||||
err = __written_first_block(sbi, ri);
|
||||
if (err < 0) {
|
||||
f2fs_put_page(node_page, 1);
|
||||
return err;
|
||||
}
|
||||
if (!err)
|
||||
set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
|
||||
|
||||
if (!need_inode_block_update(sbi, inode->i_ino))
|
||||
|
||||
@@ -1365,12 +1365,7 @@ static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
|
||||
task))
|
||||
return;
|
||||
|
||||
if (ff_layout_read_prepare_common(task, hdr))
|
||||
return;
|
||||
|
||||
if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
|
||||
hdr->args.lock_context, FMODE_READ) == -EIO)
|
||||
rpc_exit(task, -EIO); /* lost lock, terminate I/O */
|
||||
ff_layout_read_prepare_common(task, hdr);
|
||||
}
|
||||
|
||||
static void ff_layout_read_call_done(struct rpc_task *task, void *data)
|
||||
@@ -1539,12 +1534,7 @@ static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
|
||||
task))
|
||||
return;
|
||||
|
||||
if (ff_layout_write_prepare_common(task, hdr))
|
||||
return;
|
||||
|
||||
if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
|
||||
hdr->args.lock_context, FMODE_WRITE) == -EIO)
|
||||
rpc_exit(task, -EIO); /* lost lock, terminate I/O */
|
||||
ff_layout_write_prepare_common(task, hdr);
|
||||
}
|
||||
|
||||
static void ff_layout_write_call_done(struct rpc_task *task, void *data)
|
||||
@@ -1734,6 +1724,10 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
|
||||
fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
|
||||
if (fh)
|
||||
hdr->args.fh = fh;
|
||||
|
||||
if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
|
||||
goto out_failed;
|
||||
|
||||
/*
|
||||
* Note that if we ever decide to split across DSes,
|
||||
* then we may need to handle dense-like offsets.
|
||||
@@ -1796,6 +1790,9 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
|
||||
if (fh)
|
||||
hdr->args.fh = fh;
|
||||
|
||||
if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
|
||||
goto out_failed;
|
||||
|
||||
/*
|
||||
* Note that if we ever decide to split across DSes,
|
||||
* then we may need to handle dense-like offsets.
|
||||
|
||||
@@ -214,6 +214,10 @@ unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
|
||||
unsigned int maxnum);
|
||||
struct nfs_fh *
|
||||
nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx);
|
||||
int
|
||||
nfs4_ff_layout_select_ds_stateid(struct pnfs_layout_segment *lseg,
|
||||
u32 mirror_idx,
|
||||
nfs4_stateid *stateid);
|
||||
|
||||
struct nfs4_pnfs_ds *
|
||||
nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
|
||||
|
||||
@@ -369,6 +369,25 @@ out:
|
||||
return fh;
|
||||
}
|
||||
|
||||
int
|
||||
nfs4_ff_layout_select_ds_stateid(struct pnfs_layout_segment *lseg,
|
||||
u32 mirror_idx,
|
||||
nfs4_stateid *stateid)
|
||||
{
|
||||
struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx);
|
||||
|
||||
if (!ff_layout_mirror_valid(lseg, mirror, false)) {
|
||||
pr_err_ratelimited("NFS: %s: No data server for mirror offset index %d\n",
|
||||
__func__, mirror_idx);
|
||||
goto out;
|
||||
}
|
||||
|
||||
nfs4_stateid_copy(stateid, &mirror->stateid);
|
||||
return 1;
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call
|
||||
* @lseg: the layout segment we're operating on
|
||||
|
||||
@@ -869,6 +869,13 @@ struct vmbus_channel {
|
||||
|
||||
bool probe_done;
|
||||
|
||||
/*
|
||||
* We must offload the handling of the primary/sub channels
|
||||
* from the single-threaded vmbus_connection.work_queue to
|
||||
* two different workqueue, otherwise we can block
|
||||
* vmbus_connection.work_queue and hang: see vmbus_process_offer().
|
||||
*/
|
||||
struct work_struct add_channel_work;
|
||||
};
|
||||
|
||||
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
|
||||
|
||||
@@ -407,11 +407,11 @@ struct usb_host_bos {
|
||||
};
|
||||
|
||||
int __usb_get_extra_descriptor(char *buffer, unsigned size,
|
||||
unsigned char type, void **ptr);
|
||||
unsigned char type, void **ptr, size_t min);
|
||||
#define usb_get_extra_descriptor(ifpoint, type, ptr) \
|
||||
__usb_get_extra_descriptor((ifpoint)->extra, \
|
||||
(ifpoint)->extralen, \
|
||||
type, (void **)ptr)
|
||||
type, (void **)ptr, sizeof(**(ptr)))
|
||||
|
||||
/* ----------------------------------------------------------------------- */
|
||||
|
||||
|
||||
@@ -247,11 +247,13 @@ static inline int snd_interval_empty(const struct snd_interval *i)
|
||||
static inline int snd_interval_single(const struct snd_interval *i)
|
||||
{
|
||||
return (i->min == i->max ||
|
||||
(i->min + 1 == i->max && i->openmax));
|
||||
(i->min + 1 == i->max && (i->openmin || i->openmax)));
|
||||
}
|
||||
|
||||
static inline int snd_interval_value(const struct snd_interval *i)
|
||||
{
|
||||
if (i->openmin && !i->openmax)
|
||||
return i->max;
|
||||
return i->min;
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,8 @@
|
||||
* 08/12/11 beckyb Add highmem support
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "software IO TLB: " fmt
|
||||
|
||||
#include <linux/cache.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/mm.h>
|
||||
@@ -177,20 +179,16 @@ static bool no_iotlb_memory;
|
||||
void swiotlb_print_info(void)
|
||||
{
|
||||
unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
|
||||
unsigned char *vstart, *vend;
|
||||
|
||||
if (no_iotlb_memory) {
|
||||
pr_warn("software IO TLB: No low mem\n");
|
||||
pr_warn("No low mem\n");
|
||||
return;
|
||||
}
|
||||
|
||||
vstart = phys_to_virt(io_tlb_start);
|
||||
vend = phys_to_virt(io_tlb_end);
|
||||
|
||||
printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
|
||||
pr_info("mapped [mem %#010llx-%#010llx] (%luMB)\n",
|
||||
(unsigned long long)io_tlb_start,
|
||||
(unsigned long long)io_tlb_end,
|
||||
bytes >> 20, vstart, vend - 1);
|
||||
bytes >> 20);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -290,7 +288,7 @@ swiotlb_init(int verbose)
|
||||
if (io_tlb_start)
|
||||
memblock_free_early(io_tlb_start,
|
||||
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
|
||||
pr_warn("Cannot allocate SWIOTLB buffer");
|
||||
pr_warn("Cannot allocate buffer");
|
||||
no_iotlb_memory = true;
|
||||
}
|
||||
|
||||
@@ -332,8 +330,8 @@ swiotlb_late_init_with_default_size(size_t default_size)
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (order != get_order(bytes)) {
|
||||
printk(KERN_WARNING "Warning: only able to allocate %ld MB "
|
||||
"for software IO TLB\n", (PAGE_SIZE << order) >> 20);
|
||||
pr_warn("only able to allocate %ld MB\n",
|
||||
(PAGE_SIZE << order) >> 20);
|
||||
io_tlb_nslabs = SLABS_PER_PAGE << order;
|
||||
}
|
||||
rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
|
||||
@@ -770,7 +768,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
||||
|
||||
err_warn:
|
||||
if (warn && printk_ratelimit()) {
|
||||
pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n",
|
||||
pr_warn("coherent allocation failed for device %s size=%zu\n",
|
||||
dev_name(hwdev), size);
|
||||
dump_stack();
|
||||
}
|
||||
|
||||
@@ -838,6 +838,7 @@ static ssize_t read_firmware_show(struct device *dev,
|
||||
if (req->fw->size > PAGE_SIZE) {
|
||||
pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
memcpy(buf, req->fw->data, req->fw->size);
|
||||
|
||||
|
||||
@@ -3874,8 +3874,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
||||
enum compact_result compact_result;
|
||||
int compaction_retries;
|
||||
int no_progress_loops;
|
||||
unsigned long alloc_start = jiffies;
|
||||
unsigned int stall_timeout = 10 * HZ;
|
||||
unsigned int cpuset_mems_cookie;
|
||||
int reserve_flags;
|
||||
|
||||
@@ -3995,14 +3993,6 @@ retry:
|
||||
if (!can_direct_reclaim)
|
||||
goto nopage;
|
||||
|
||||
/* Make sure we know about allocations which stall for too long */
|
||||
if (time_after(jiffies, alloc_start + stall_timeout)) {
|
||||
warn_alloc(gfp_mask & ~__GFP_NOWARN, ac->nodemask,
|
||||
"page allocation stalls for %ums, order:%u",
|
||||
jiffies_to_msecs(jiffies-alloc_start), order);
|
||||
stall_timeout += 10 * HZ;
|
||||
}
|
||||
|
||||
/* Avoid recursion of direct reclaim */
|
||||
if (current->flags & PF_MEMALLOC)
|
||||
goto nopage;
|
||||
|
||||
@@ -338,19 +338,21 @@ out:
|
||||
*/
|
||||
int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface)
|
||||
{
|
||||
static const size_t tvlv_padding = sizeof(__be32);
|
||||
struct batadv_elp_packet *elp_packet;
|
||||
unsigned char *elp_buff;
|
||||
u32 random_seqno;
|
||||
size_t size;
|
||||
int res = -ENOMEM;
|
||||
|
||||
size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN;
|
||||
size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN + tvlv_padding;
|
||||
hard_iface->bat_v.elp_skb = dev_alloc_skb(size);
|
||||
if (!hard_iface->bat_v.elp_skb)
|
||||
goto out;
|
||||
|
||||
skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN);
|
||||
elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN);
|
||||
elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb,
|
||||
BATADV_ELP_HLEN + tvlv_padding);
|
||||
elp_packet = (struct batadv_elp_packet *)elp_buff;
|
||||
|
||||
elp_packet->packet_type = BATADV_ELP;
|
||||
|
||||
@@ -274,7 +274,7 @@ batadv_frag_merge_packets(struct hlist_head *chain)
|
||||
kfree(entry);
|
||||
|
||||
packet = (struct batadv_frag_packet *)skb_out->data;
|
||||
size = ntohs(packet->total_size);
|
||||
size = ntohs(packet->total_size) + hdr_size;
|
||||
|
||||
/* Make room for the rest of the fragments. */
|
||||
if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
|
||||
|
||||
@@ -1032,6 +1032,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
|
||||
if (local->open_count == 0)
|
||||
ieee80211_clear_tx_pending(local);
|
||||
|
||||
sdata->vif.bss_conf.beacon_int = 0;
|
||||
|
||||
/*
|
||||
* If the interface goes down while suspended, presumably because
|
||||
* the device was unplugged and that happens before our resume,
|
||||
|
||||
@@ -1254,6 +1254,7 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
|
||||
return RX_CONTINUE;
|
||||
|
||||
if (ieee80211_is_ctl(hdr->frame_control) ||
|
||||
ieee80211_is_nullfunc(hdr->frame_control) ||
|
||||
ieee80211_is_qos_nullfunc(hdr->frame_control) ||
|
||||
is_multicast_ether_addr(hdr->addr1))
|
||||
return RX_CONTINUE;
|
||||
|
||||
@@ -953,6 +953,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
|
||||
/* Track when last TDLS packet was ACKed */
|
||||
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
|
||||
sta->status_stats.last_tdls_pkt_time = jiffies;
|
||||
} else if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
|
||||
return;
|
||||
} else {
|
||||
ieee80211_lost_packet(sta, info);
|
||||
}
|
||||
|
||||
@@ -435,8 +435,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
|
||||
if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL))
|
||||
info->hw_queue = tx->sdata->vif.cab_queue;
|
||||
|
||||
/* no stations in PS mode */
|
||||
if (!atomic_read(&ps->num_sta_ps))
|
||||
/* no stations in PS mode and no buffered packets */
|
||||
if (!atomic_read(&ps->num_sta_ps) && skb_queue_empty(&ps->bc_buf))
|
||||
return TX_CONTINUE;
|
||||
|
||||
info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
|
||||
|
||||
@@ -1736,6 +1736,7 @@ priv_release_snd_buf(struct rpc_rqst *rqstp)
|
||||
for (i=0; i < rqstp->rq_enc_pages_num; i++)
|
||||
__free_page(rqstp->rq_enc_pages[i]);
|
||||
kfree(rqstp->rq_enc_pages);
|
||||
rqstp->rq_release_snd_buf = NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -1744,6 +1745,9 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
|
||||
struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
|
||||
int first, last, i;
|
||||
|
||||
if (rqstp->rq_release_snd_buf)
|
||||
rqstp->rq_release_snd_buf(rqstp);
|
||||
|
||||
if (snd_buf->page_len == 0) {
|
||||
rqstp->rq_enc_pages_num = 0;
|
||||
return 0;
|
||||
|
||||
@@ -36,6 +36,7 @@
|
||||
#include <sound/timer.h>
|
||||
#include <sound/minors.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "pcm_local.h"
|
||||
|
||||
@@ -91,12 +92,12 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem);
|
||||
* and this may lead to a deadlock when the code path takes read sem
|
||||
* twice (e.g. one in snd_pcm_action_nonatomic() and another in
|
||||
* snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to
|
||||
* spin until it gets the lock.
|
||||
* sleep until all the readers are completed without blocking by writer.
|
||||
*/
|
||||
static inline void down_write_nonblock(struct rw_semaphore *lock)
|
||||
static inline void down_write_nonfifo(struct rw_semaphore *lock)
|
||||
{
|
||||
while (!down_write_trylock(lock))
|
||||
cond_resched();
|
||||
msleep(1);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1935,7 +1936,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
|
||||
res = -ENOMEM;
|
||||
goto _nolock;
|
||||
}
|
||||
down_write_nonblock(&snd_pcm_link_rwsem);
|
||||
down_write_nonfifo(&snd_pcm_link_rwsem);
|
||||
write_lock_irq(&snd_pcm_link_rwlock);
|
||||
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
|
||||
substream->runtime->status->state != substream1->runtime->status->state ||
|
||||
@@ -1982,7 +1983,7 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
|
||||
struct snd_pcm_substream *s;
|
||||
int res = 0;
|
||||
|
||||
down_write_nonblock(&snd_pcm_link_rwsem);
|
||||
down_write_nonfifo(&snd_pcm_link_rwsem);
|
||||
write_lock_irq(&snd_pcm_link_rwlock);
|
||||
if (!snd_pcm_stream_linked(substream)) {
|
||||
res = -EALREADY;
|
||||
@@ -2337,7 +2338,8 @@ int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
|
||||
|
||||
static void pcm_release_private(struct snd_pcm_substream *substream)
|
||||
{
|
||||
snd_pcm_unlink(substream);
|
||||
if (snd_pcm_stream_linked(substream))
|
||||
snd_pcm_unlink(substream);
|
||||
}
|
||||
|
||||
void snd_pcm_release_substream(struct snd_pcm_substream *substream)
|
||||
|
||||
@@ -2513,6 +2513,10 @@ static const struct pci_device_id azx_ids[] = {
|
||||
/* AMD Hudson */
|
||||
{ PCI_DEVICE(0x1022, 0x780d),
|
||||
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
|
||||
/* AMD Stoney */
|
||||
{ PCI_DEVICE(0x1022, 0x157a),
|
||||
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
|
||||
AZX_DCAPS_PM_RUNTIME },
|
||||
/* AMD Raven */
|
||||
{ PCI_DEVICE(0x1022, 0x15e3),
|
||||
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
|
||||
|
||||
@@ -4863,9 +4863,18 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
|
||||
{ 0x19, 0x21a11010 }, /* dock mic */
|
||||
{ }
|
||||
};
|
||||
/* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise
|
||||
* the speaker output becomes too low by some reason on Thinkpads with
|
||||
* ALC298 codec
|
||||
*/
|
||||
static hda_nid_t preferred_pairs[] = {
|
||||
0x14, 0x03, 0x17, 0x02, 0x21, 0x02,
|
||||
0
|
||||
};
|
||||
struct alc_spec *spec = codec->spec;
|
||||
|
||||
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
|
||||
spec->gen.preferred_dacs = preferred_pairs;
|
||||
spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
|
||||
snd_hda_apply_pincfgs(codec, pincfgs);
|
||||
} else if (action == HDA_FIXUP_ACT_INIT) {
|
||||
|
||||
@@ -644,9 +644,12 @@ static int usb_audio_probe(struct usb_interface *intf,
|
||||
|
||||
__error:
|
||||
if (chip) {
|
||||
/* chip->active is inside the chip->card object,
|
||||
* decrement before memory is possibly returned.
|
||||
*/
|
||||
atomic_dec(&chip->active);
|
||||
if (!chip->num_interfaces)
|
||||
snd_card_free(chip->card);
|
||||
atomic_dec(&chip->active);
|
||||
}
|
||||
mutex_unlock(®ister_mutex);
|
||||
return err;
|
||||
|
||||
Reference in New Issue
Block a user