Merge 4.9.292 into android-4.9-q

Changes in 4.9.292
	staging: ion: Prevent incorrect reference counting behavour
	USB: serial: option: add Telit LE910S1 0x9200 composition
	USB: serial: option: add Fibocom FM101-GL variants
	usb: hub: Fix usb enumeration issue due to address0 race
	usb: hub: Fix locking issues with address0_mutex
	binder: fix test regression due to sender_euid change
	ALSA: ctxfi: Fix out-of-range access
	staging: rtl8192e: Fix use after free in _rtl92e_pci_disconnect()
	fuse: fix page stealing
	xen: don't continue xenstore initialization in case of errors
	xen: detect uninitialized xenbus in xenbus_init
	tracing: Fix pid filtering when triggers are attached
	ARM: dts: BCM5301X: Add interrupt properties to GPIO node
	ASoC: topology: Add missing rwsem around snd_ctl_remove() calls
	net: ieee802154: handle iftypes as u32
	NFSv42: Don't fail clone() unless the OP_CLONE operation failed
	ARM: socfpga: Fix crash with CONFIG_FORTIRY_SOURCE
	scsi: mpt3sas: Fix kernel panic during drive powercycle test
	drm/vc4: fix error code in vc4_create_object()
	PM: hibernate: use correct mode for swsusp_close()
	tcp_cubic: fix spurious Hystart ACK train detections for not-cwnd-limited flows
	tracing: Check pid filtering when creating events
	hugetlbfs: flush TLBs correctly after huge_pmd_unshare
	vhost/vsock: fix incorrect used length reported to the guest
	proc/vmcore: fix clearing user buffer by properly using clear_user()
	NFC: add NCI_UNREG flag to eliminate the race
	fuse: release pipe buf after last use
	xen: sync include/xen/interface/io/ring.h with Xen's newest version
	xen/blkfront: read response from backend only once
	xen/blkfront: don't take local copy of a request from the ring page
	xen/blkfront: don't trust the backend response data blindly
	xen/netfront: read response from backend only once
	xen/netfront: don't read data from request on the ring page
	xen/netfront: disentangle tx_skb_freelist
	xen/netfront: don't trust the backend response data blindly
	tty: hvc: replace BUG_ON() with negative return value
	shm: extend forced shm destroy to support objects from several IPC nses
	NFSv42: Fix pagecache invalidation after COPY/CLONE
	hugetlb: take PMD sharing into account when flushing tlb/caches
	net: return correct error code
	platform/x86: thinkpad_acpi: Fix WWAN device disabled issue after S3 deep
	s390/setup: avoid using memblock_enforce_memory_limit
	thermal: core: Reset previous low and high trip during thermal zone init
	scsi: iscsi: Unblock session then wake up error handler
	ethernet: hisilicon: hns: hns_dsaf_misc: fix a possible array overflow in hns_dsaf_ge_srst_by_port()
	net: tulip: de4x5: fix the problem that the array 'lp->phy[8]' may be out of bound
	net: ethernet: dec: tulip: de4x5: fix possible array overflows in type3_infoblock()
	vrf: Reset IPCB/IP6CB when processing outbound pkts in vrf dev xmit
	kprobes: Limit max data_size of the kretprobe instances
	sata_fsl: fix UAF in sata_fsl_port_stop when rmmod sata_fsl
	sata_fsl: fix warning in remove_proc_entry when rmmod sata_fsl
	fs: add fget_many() and fput_many()
	fget: check that the fd still exists after getting a ref to it
	natsemi: xtensa: fix section mismatch warnings
	net: qlogic: qlcnic: Fix a NULL pointer dereference in qlcnic_83xx_add_rings()
	siphash: use _unaligned version by default
	net/rds: correct socket tunable error in rds_tcp_tune()
	parisc: Fix "make install" on newer debian releases
	vgacon: Propagate console boot parameters before calling `vc_resize'
	tty: serial: msm_serial: Deactivate RX DMA for polling support
	serial: pl011: Add ACPI SBSA UART match id
	serial: core: fix transmit-buffer reset and memleak
	Linux 4.9.292

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I98a677406cfca6a63fffa4a94e45edbd45fd671a
This commit is contained in:
Greg Kroah-Hartman
2021-12-08 09:05:25 +01:00
70 changed files with 970 additions and 484 deletions

View File

@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 291
SUBLEVEL = 292
EXTRAVERSION =
NAME = Roaring Lionus

View File

@@ -234,6 +234,8 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
usb2: usb2@21000 {

View File

@@ -278,6 +278,14 @@ tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr
tlb_add_flush(tlb, addr);
}
static inline void
tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
unsigned long size)
{
tlb_add_flush(tlb, address);
tlb_add_flush(tlb, address + size - PMD_SIZE);
}
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)

View File

@@ -48,7 +48,7 @@ extern void __iomem *sdr_ctl_base_addr;
u32 socfpga_sdram_self_refresh(u32 sdr_base);
extern unsigned int socfpga_sdram_self_refresh_sz;
extern char secondary_trampoline, secondary_trampoline_end;
extern char secondary_trampoline[], secondary_trampoline_end[];
extern unsigned long socfpga_cpu1start_addr;

View File

@@ -31,14 +31,14 @@
static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
int trampoline_size = secondary_trampoline_end - secondary_trampoline;
if (socfpga_cpu1start_addr) {
/* This will put CPU #1 into reset. */
writel(RSTMGR_MPUMODRST_CPU1,
rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST);
memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
writel(virt_to_phys(secondary_startup),
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
@@ -56,12 +56,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
int trampoline_size = secondary_trampoline_end - secondary_trampoline;
if (socfpga_cpu1start_addr) {
writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr +
SOCFPGA_A10_RSTMGR_MODMPURST);
memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
writel(virt_to_phys(secondary_startup),
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff));

View File

@@ -272,6 +272,16 @@ __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long addre
tlb->end_addr = address + PAGE_SIZE;
}
static inline void
tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
unsigned long size)
{
if (tlb->start_addr > address)
tlb->start_addr = address;
if (tlb->end_addr < address + size)
tlb->end_addr = address + size;
}
#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
#define tlb_start_vma(tlb, vma) do { } while (0)

View File

@@ -39,6 +39,7 @@ verify "$3"
if [ -n "${INSTALLKERNEL}" ]; then
if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
if [ -x /usr/sbin/${INSTALLKERNEL} ]; then exec /usr/sbin/${INSTALLKERNEL} "$@"; fi
fi
# Default install

View File

@@ -116,6 +116,20 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
return tlb_remove_page(tlb, page);
}
static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
unsigned long address, unsigned long size)
{
/*
* the range might exceed the original range that was provided to
* tlb_gather_mmu(), so we need to update it despite the fact it is
* usually not updated.
*/
if (tlb->start > address)
tlb->start = address;
if (tlb->end < address + size)
tlb->end = address + size;
}
/*
* pte_free_tlb frees a pte table and clears the CRSTE for the
* page table from the tlb.

View File

@@ -693,9 +693,6 @@ static void __init setup_memory(void)
storage_key_init_range(reg->base, reg->base + reg->size);
}
psw_set_key(PAGE_DEFAULT_KEY);
/* Only cosmetics */
memblock_enforce_memory_limit(memblock_end_of_DRAM());
}
/*

View File

@@ -115,6 +115,16 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
return __tlb_remove_page(tlb, page);
}
static inline void
tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
unsigned long size)
{
if (tlb->start > address)
tlb->start = address;
if (tlb->end < address + size)
tlb->end = address + size;
}
static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
struct page *page)
{

View File

@@ -128,6 +128,18 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
return tlb_remove_page(tlb, page);
}
static inline void
tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
unsigned long size)
{
tlb->need_flush = 1;
if (tlb->start > address)
tlb->start = address;
if (tlb->end < address + size)
tlb->end = address + size;
}
/**
* tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
*

View File

@@ -1406,6 +1406,14 @@ static int sata_fsl_init_controller(struct ata_host *host)
return 0;
}
static void sata_fsl_host_stop(struct ata_host *host)
{
struct sata_fsl_host_priv *host_priv = host->private_data;
iounmap(host_priv->hcr_base);
kfree(host_priv);
}
/*
* scsi mid-layer and libata interface structures
*/
@@ -1438,6 +1446,8 @@ static struct ata_port_operations sata_fsl_ops = {
.port_start = sata_fsl_port_start,
.port_stop = sata_fsl_port_stop,
.host_stop = sata_fsl_host_stop,
.pmp_attach = sata_fsl_pmp_attach,
.pmp_detach = sata_fsl_pmp_detach,
};
@@ -1492,9 +1502,9 @@ static int sata_fsl_probe(struct platform_device *ofdev)
host_priv->ssr_base = ssr_base;
host_priv->csr_base = csr_base;
irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
if (!irq) {
dev_err(&ofdev->dev, "invalid irq from platform\n");
irq = platform_get_irq(ofdev, 0);
if (irq < 0) {
retval = irq;
goto error_exit_with_cleanup;
}
host_priv->irq = irq;
@@ -1571,10 +1581,6 @@ static int sata_fsl_remove(struct platform_device *ofdev)
ata_host_detach(host);
irq_dispose_mapping(host_priv->irq);
iounmap(host_priv->hcr_base);
kfree(host_priv);
return 0;
}

View File

@@ -78,6 +78,7 @@ enum blkif_state {
BLKIF_STATE_DISCONNECTED,
BLKIF_STATE_CONNECTED,
BLKIF_STATE_SUSPENDED,
BLKIF_STATE_ERROR,
};
struct grant {
@@ -87,6 +88,7 @@ struct grant {
};
enum blk_req_status {
REQ_PROCESSING,
REQ_WAITING,
REQ_DONE,
REQ_ERROR,
@@ -529,10 +531,10 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
id = get_id_from_freelist(rinfo);
rinfo->shadow[id].request = req;
rinfo->shadow[id].status = REQ_WAITING;
rinfo->shadow[id].status = REQ_PROCESSING;
rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
(*ring_req)->u.rw.id = id;
rinfo->shadow[id].req.u.rw.id = id;
return id;
}
@@ -540,11 +542,12 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
{
struct blkfront_info *info = rinfo->dev_info;
struct blkif_request *ring_req;
struct blkif_request *ring_req, *final_ring_req;
unsigned long id;
/* Fill out a communications ring structure. */
id = blkif_ring_get_request(rinfo, req, &ring_req);
id = blkif_ring_get_request(rinfo, req, &final_ring_req);
ring_req = &rinfo->shadow[id].req;
ring_req->operation = BLKIF_OP_DISCARD;
ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
@@ -555,8 +558,9 @@ static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_inf
else
ring_req->u.discard.flag = 0;
/* Keep a private copy so we can reissue requests when recovering. */
rinfo->shadow[id].req = *ring_req;
/* Copy the request to the ring page. */
*final_ring_req = *ring_req;
rinfo->shadow[id].status = REQ_WAITING;
return 0;
}
@@ -689,6 +693,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
{
struct blkfront_info *info = rinfo->dev_info;
struct blkif_request *ring_req, *extra_ring_req = NULL;
struct blkif_request *final_ring_req, *final_extra_ring_req = NULL;
unsigned long id, extra_id = NO_ASSOCIATED_ID;
bool require_extra_req = false;
int i;
@@ -730,7 +735,8 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
}
/* Fill out a communications ring structure. */
id = blkif_ring_get_request(rinfo, req, &ring_req);
id = blkif_ring_get_request(rinfo, req, &final_ring_req);
ring_req = &rinfo->shadow[id].req;
num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
num_grant = 0;
@@ -781,7 +787,9 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
ring_req->u.rw.nr_segments = num_grant;
if (unlikely(require_extra_req)) {
extra_id = blkif_ring_get_request(rinfo, req,
&extra_ring_req);
&final_extra_ring_req);
extra_ring_req = &rinfo->shadow[extra_id].req;
/*
* Only the first request contains the scatter-gather
* list.
@@ -823,10 +831,13 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
if (setup.segments)
kunmap_atomic(setup.segments);
/* Keep a private copy so we can reissue requests when recovering. */
rinfo->shadow[id].req = *ring_req;
if (unlikely(require_extra_req))
rinfo->shadow[extra_id].req = *extra_ring_req;
/* Copy request(s) to the ring page. */
*final_ring_req = *ring_req;
rinfo->shadow[id].status = REQ_WAITING;
if (unlikely(require_extra_req)) {
*final_extra_ring_req = *extra_ring_req;
rinfo->shadow[extra_id].status = REQ_WAITING;
}
if (max_grefs > 0)
gnttab_free_grant_references(setup.gref_head);
@@ -1396,8 +1407,8 @@ static enum blk_req_status blkif_rsp_to_req_status(int rsp)
static int blkif_get_final_status(enum blk_req_status s1,
enum blk_req_status s2)
{
BUG_ON(s1 == REQ_WAITING);
BUG_ON(s2 == REQ_WAITING);
BUG_ON(s1 < REQ_DONE);
BUG_ON(s2 < REQ_DONE);
if (s1 == REQ_ERROR || s2 == REQ_ERROR)
return BLKIF_RSP_ERROR;
@@ -1430,7 +1441,7 @@ static bool blkif_completion(unsigned long *id,
s->status = blkif_rsp_to_req_status(bret->status);
/* Wait the second response if not yet here. */
if (s2->status == REQ_WAITING)
if (s2->status < REQ_DONE)
return 0;
bret->status = blkif_get_final_status(s->status,
@@ -1538,7 +1549,7 @@ static bool blkif_completion(unsigned long *id,
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
{
struct request *req;
struct blkif_response *bret;
struct blkif_response bret;
RING_IDX i, rp;
unsigned long flags;
struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
@@ -1550,50 +1561,72 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
spin_lock_irqsave(&rinfo->ring_lock, flags);
again:
rp = rinfo->ring.sring->rsp_prod;
rmb(); /* Ensure we see queued responses up to 'rp'. */
rp = READ_ONCE(rinfo->ring.sring->rsp_prod);
virt_rmb(); /* Ensure we see queued responses up to 'rp'. */
if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) {
pr_alert("%s: illegal number of responses %u\n",
info->gd->disk_name, rp - rinfo->ring.rsp_cons);
goto err;
}
for (i = rinfo->ring.rsp_cons; i != rp; i++) {
unsigned long id;
unsigned int op;
RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
id = bret.id;
bret = RING_GET_RESPONSE(&rinfo->ring, i);
id = bret->id;
/*
* The backend has messed up and given us an id that we would
* never have given to it (we stamp it up to BLK_RING_SIZE -
* look in get_id_from_freelist.
*/
if (id >= BLK_RING_SIZE(info)) {
WARN(1, "%s: response to %s has incorrect id (%ld)\n",
info->gd->disk_name, op_name(bret->operation), id);
/* We can't safely get the 'struct request' as
* the id is busted. */
continue;
pr_alert("%s: response has incorrect id (%ld)\n",
info->gd->disk_name, id);
goto err;
}
if (rinfo->shadow[id].status != REQ_WAITING) {
pr_alert("%s: response references no pending request\n",
info->gd->disk_name);
goto err;
}
rinfo->shadow[id].status = REQ_PROCESSING;
req = rinfo->shadow[id].request;
if (bret->operation != BLKIF_OP_DISCARD) {
op = rinfo->shadow[id].req.operation;
if (op == BLKIF_OP_INDIRECT)
op = rinfo->shadow[id].req.u.indirect.indirect_op;
if (bret.operation != op) {
pr_alert("%s: response has wrong operation (%u instead of %u)\n",
info->gd->disk_name, bret.operation, op);
goto err;
}
if (bret.operation != BLKIF_OP_DISCARD) {
/*
* We may need to wait for an extra response if the
* I/O request is split in 2
*/
if (!blkif_completion(&id, rinfo, bret))
if (!blkif_completion(&id, rinfo, &bret))
continue;
}
if (add_id_to_freelist(rinfo, id)) {
WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
info->gd->disk_name, op_name(bret->operation), id);
info->gd->disk_name, op_name(bret.operation), id);
continue;
}
error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
switch (bret->operation) {
error = (bret.status == BLKIF_RSP_OKAY) ? 0 : -EIO;
switch (bret.operation) {
case BLKIF_OP_DISCARD:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
struct request_queue *rq = info->rq;
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
pr_warn_ratelimited("blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret.operation));
error = -EOPNOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
@@ -1604,15 +1637,15 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
break;
case BLKIF_OP_FLUSH_DISKCACHE:
case BLKIF_OP_WRITE_BARRIER:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
pr_warn_ratelimited("blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret.operation));
error = -EOPNOTSUPP;
}
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
if (unlikely(bret.status == BLKIF_RSP_ERROR &&
rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
pr_warn_ratelimited("blkfront: %s: empty %s op failed\n",
info->gd->disk_name, op_name(bret.operation));
error = -EOPNOTSUPP;
}
if (unlikely(error)) {
@@ -1625,9 +1658,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
/* fall through */
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
if (unlikely(bret->status != BLKIF_RSP_OKAY))
dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
"request: %x\n", bret->status);
if (unlikely(bret.status != BLKIF_RSP_OKAY))
dev_dbg_ratelimited(&info->xbdev->dev,
"Bad return from blkdev data request: %#x\n",
bret.status);
blk_mq_complete_request(req, error);
break;
@@ -1651,6 +1685,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
return IRQ_HANDLED;
err:
info->connected = BLKIF_STATE_ERROR;
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
pr_alert("%s disabled for further use\n", info->gd->disk_name);
return IRQ_HANDLED;
}

View File

@@ -198,7 +198,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
return ERR_PTR(-ENOMEM);
return NULL;
mutex_lock(&vc4->bo_lock);
vc4->bo_stats.num_allocated++;

View File

@@ -4704,6 +4704,10 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p)
lp->ibn = 3;
lp->active = *p++;
if (MOTO_SROM_BUG) lp->active = 0;
/* if (MOTO_SROM_BUG) statement indicates lp->active could
* be 8 (i.e. the size of array lp->phy) */
if (WARN_ON(lp->active >= ARRAY_SIZE(lp->phy)))
return -EINVAL;
lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
@@ -4995,19 +4999,23 @@ mii_get_phy(struct net_device *dev)
}
if ((j == limit) && (i < DE4X5_MAX_MII)) {
for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
lp->phy[k].addr = i;
lp->phy[k].id = id;
lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
lp->mii_cnt++;
lp->active++;
printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
j = de4x5_debug;
de4x5_debug |= DEBUG_MII;
de4x5_dbg_mii(dev, k);
de4x5_debug = j;
printk("\n");
if (k < DE4X5_MAX_PHY) {
lp->phy[k].addr = i;
lp->phy[k].id = id;
lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
lp->mii_cnt++;
lp->active++;
printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
j = de4x5_debug;
de4x5_debug |= DEBUG_MII;
de4x5_dbg_mii(dev, k);
de4x5_debug = j;
printk("\n");
} else {
goto purgatory;
}
}
}
purgatory:

View File

@@ -312,6 +312,10 @@ static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
return;
if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
/* DSAF_MAX_PORT_NUM is 6, but DSAF_GE_NUM is 8.
We need check to prevent array overflow */
if (port >= DSAF_MAX_PORT_NUM)
return;
reg_val_1 = 0x1 << port;
port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off;
/* there is difference between V1 and V2 in register.*/

View File

@@ -128,7 +128,7 @@ static const struct net_device_ops xtsonic_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
static int __init sonic_probe1(struct net_device *dev)
static int sonic_probe1(struct net_device *dev)
{
static unsigned version_printed = 0;
unsigned int silicon_revision;

View File

@@ -1078,8 +1078,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
context_id = recv_ctx->context_id;
num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
QLCNIC_CMD_ADD_RCV_RINGS);
err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
QLCNIC_CMD_ADD_RCV_RINGS);
if (err) {
dev_err(&adapter->pdev->dev,
"Failed to alloc mbx args %d\n", err);
return err;
}
cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
/* set up status rings, mbx 2-81 */

View File

@@ -226,6 +226,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
/* strip the ethernet header added for pass through VRF device */
__skb_pull(skb, skb_network_offset(skb));
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
ret = vrf_ip6_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(ret)))
dev->stats.tx_errors++;
@@ -332,6 +333,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
RT_SCOPE_LINK);
}
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
if (unlikely(net_xmit_eval(ret)))
vrf_dev->stats.tx_errors++;

View File

@@ -120,21 +120,17 @@ struct netfront_queue {
/*
* {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
* are linked from tx_skb_freelist through skb_entry.link.
*
* NB. Freelist index entries are always going to be less than
* PAGE_OFFSET, whereas pointers to skbs will always be equal or
* greater than PAGE_OFFSET: we use this property to distinguish
* them.
* are linked from tx_skb_freelist through tx_link.
*/
union skb_entry {
struct sk_buff *skb;
unsigned long link;
} tx_skbs[NET_TX_RING_SIZE];
struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
unsigned short tx_link[NET_TX_RING_SIZE];
#define TX_LINK_NONE 0xffff
#define TX_PENDING 0xfffe
grant_ref_t gref_tx_head;
grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
struct page *grant_tx_page[NET_TX_RING_SIZE];
unsigned tx_skb_freelist;
unsigned int tx_pend_queue;
spinlock_t rx_lock ____cacheline_aligned_in_smp;
struct xen_netif_rx_front_ring rx;
@@ -160,6 +156,9 @@ struct netfront_info {
struct netfront_stats __percpu *rx_stats;
struct netfront_stats __percpu *tx_stats;
/* Is device behaving sane? */
bool broken;
atomic_t rx_gso_checksum_fixup;
};
@@ -168,33 +167,25 @@ struct netfront_rx_info {
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
};
static void skb_entry_set_link(union skb_entry *list, unsigned short id)
{
list->link = id;
}
static int skb_entry_is_link(const union skb_entry *list)
{
BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
return (unsigned long)list->skb < PAGE_OFFSET;
}
/*
* Access macros for acquiring freeing slots in tx_skbs[].
*/
static void add_id_to_freelist(unsigned *head, union skb_entry *list,
unsigned short id)
static void add_id_to_list(unsigned *head, unsigned short *list,
unsigned short id)
{
skb_entry_set_link(&list[id], *head);
list[id] = *head;
*head = id;
}
static unsigned short get_id_from_freelist(unsigned *head,
union skb_entry *list)
static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
{
unsigned int id = *head;
*head = list[id].link;
if (id != TX_LINK_NONE) {
*head = list[id];
list[id] = TX_LINK_NONE;
}
return id;
}
@@ -352,7 +343,7 @@ static int xennet_open(struct net_device *dev)
unsigned int i = 0;
struct netfront_queue *queue = NULL;
if (!np->queues)
if (!np->queues || np->broken)
return -ENODEV;
for (i = 0; i < num_queues; ++i) {
@@ -380,27 +371,47 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
unsigned short id;
struct sk_buff *skb;
bool more_to_do;
const struct device *dev = &queue->info->netdev->dev;
BUG_ON(!netif_carrier_ok(queue->info->netdev));
do {
prod = queue->tx.sring->rsp_prod;
if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
dev_alert(dev, "Illegal number of responses %u\n",
prod - queue->tx.rsp_cons);
goto err;
}
rmb(); /* Ensure we see responses up to 'rp'. */
for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
struct xen_netif_tx_response *txrsp;
struct xen_netif_tx_response txrsp;
txrsp = RING_GET_RESPONSE(&queue->tx, cons);
if (txrsp->status == XEN_NETIF_RSP_NULL)
RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
if (txrsp.status == XEN_NETIF_RSP_NULL)
continue;
id = txrsp->id;
skb = queue->tx_skbs[id].skb;
id = txrsp.id;
if (id >= RING_SIZE(&queue->tx)) {
dev_alert(dev,
"Response has incorrect id (%u)\n",
id);
goto err;
}
if (queue->tx_link[id] != TX_PENDING) {
dev_alert(dev,
"Response for inactive request\n");
goto err;
}
queue->tx_link[id] = TX_LINK_NONE;
skb = queue->tx_skbs[id];
queue->tx_skbs[id] = NULL;
if (unlikely(gnttab_query_foreign_access(
queue->grant_tx_ref[id]) != 0)) {
pr_alert("%s: warning -- grant still in use by backend domain\n",
__func__);
BUG();
dev_alert(dev,
"Grant still in use by backend domain\n");
goto err;
}
gnttab_end_foreign_access_ref(
queue->grant_tx_ref[id], GNTMAP_readonly);
@@ -408,7 +419,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
&queue->gref_tx_head, queue->grant_tx_ref[id]);
queue->grant_tx_ref[id] = GRANT_INVALID_REF;
queue->grant_tx_page[id] = NULL;
add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
dev_kfree_skb_irq(skb);
}
@@ -418,13 +429,20 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
} while (more_to_do);
xennet_maybe_wake_tx(queue);
return;
err:
queue->info->broken = true;
dev_alert(dev, "Disabled for further use\n");
}
struct xennet_gnttab_make_txreq {
struct netfront_queue *queue;
struct sk_buff *skb;
struct page *page;
struct xen_netif_tx_request *tx; /* Last request */
struct xen_netif_tx_request *tx; /* Last request on ring page */
struct xen_netif_tx_request tx_local; /* Last request local copy*/
unsigned int size;
};
@@ -440,7 +458,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
struct netfront_queue *queue = info->queue;
struct sk_buff *skb = info->skb;
id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
@@ -448,34 +466,37 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
gfn, GNTMAP_readonly);
queue->tx_skbs[id].skb = skb;
queue->tx_skbs[id] = skb;
queue->grant_tx_page[id] = page;
queue->grant_tx_ref[id] = ref;
tx->id = id;
tx->gref = ref;
tx->offset = offset;
tx->size = len;
tx->flags = 0;
info->tx_local.id = id;
info->tx_local.gref = ref;
info->tx_local.offset = offset;
info->tx_local.size = len;
info->tx_local.flags = 0;
*tx = info->tx_local;
/*
* Put the request in the pending queue, it will be set to be pending
* when the producer index is about to be raised.
*/
add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
info->tx = tx;
info->size += tx->size;
info->size += info->tx_local.size;
}
static struct xen_netif_tx_request *xennet_make_first_txreq(
struct netfront_queue *queue, struct sk_buff *skb,
struct page *page, unsigned int offset, unsigned int len)
struct xennet_gnttab_make_txreq *info,
unsigned int offset, unsigned int len)
{
struct xennet_gnttab_make_txreq info = {
.queue = queue,
.skb = skb,
.page = page,
.size = 0,
};
info->size = 0;
gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
return info.tx;
return info->tx;
}
static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
@@ -488,35 +509,27 @@ static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
xennet_tx_setup_grant(gfn, offset, len, data);
}
static struct xen_netif_tx_request *xennet_make_txreqs(
struct netfront_queue *queue, struct xen_netif_tx_request *tx,
struct sk_buff *skb, struct page *page,
static void xennet_make_txreqs(
struct xennet_gnttab_make_txreq *info,
struct page *page,
unsigned int offset, unsigned int len)
{
struct xennet_gnttab_make_txreq info = {
.queue = queue,
.skb = skb,
.tx = tx,
};
/* Skip unused frames from start of page */
page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
while (len) {
info.page = page;
info.size = 0;
info->page = page;
info->size = 0;
gnttab_foreach_grant_in_range(page, offset, len,
xennet_make_one_txreq,
&info);
info);
page++;
offset = 0;
len -= info.size;
len -= info->size;
}
return info.tx;
}
/*
@@ -563,13 +576,22 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
return queue_idx;
}
static void xennet_mark_tx_pending(struct netfront_queue *queue)
{
unsigned int i;
while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
TX_LINK_NONE)
queue->tx_link[i] = TX_PENDING;
}
#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
struct xen_netif_tx_request *tx, *first_tx;
struct xen_netif_tx_request *first_tx;
unsigned int i;
int notify;
int slots;
@@ -578,6 +600,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int len;
unsigned long flags;
struct netfront_queue *queue = NULL;
struct xennet_gnttab_make_txreq info = { };
unsigned int num_queues = dev->real_num_tx_queues;
u16 queue_index;
struct sk_buff *nskb;
@@ -585,6 +608,8 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Drop the packet if no queues are set up */
if (num_queues < 1)
goto drop;
if (unlikely(np->broken))
goto drop;
/* Determine which queue to transmit this SKB on */
queue_index = skb_get_queue_mapping(skb);
queue = &np->queues[queue_index];
@@ -635,21 +660,24 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* First request for the linear area. */
first_tx = tx = xennet_make_first_txreq(queue, skb,
page, offset, len);
offset += tx->size;
info.queue = queue;
info.skb = skb;
info.page = page;
first_tx = xennet_make_first_txreq(&info, offset, len);
offset += info.tx_local.size;
if (offset == PAGE_SIZE) {
page++;
offset = 0;
}
len -= tx->size;
len -= info.tx_local.size;
if (skb->ip_summed == CHECKSUM_PARTIAL)
/* local packet? */
tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
first_tx->flags |= XEN_NETTXF_csum_blank |
XEN_NETTXF_data_validated;
else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
/* remote but checksummed. */
tx->flags |= XEN_NETTXF_data_validated;
first_tx->flags |= XEN_NETTXF_data_validated;
/* Optional extra info after the first request. */
if (skb_shinfo(skb)->gso_size) {
@@ -658,7 +686,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
gso = (struct xen_netif_extra_info *)
RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
tx->flags |= XEN_NETTXF_extra_info;
first_tx->flags |= XEN_NETTXF_extra_info;
gso->u.gso.size = skb_shinfo(skb)->gso_size;
gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
@@ -672,19 +700,21 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Requests for the rest of the linear area. */
tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
xennet_make_txreqs(&info, page, offset, len);
/* Requests for all the frags. */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tx = xennet_make_txreqs(queue, tx, skb,
skb_frag_page(frag), frag->page_offset,
xennet_make_txreqs(&info, skb_frag_page(frag),
frag->page_offset,
skb_frag_size(frag));
}
/* First request has the packet length. */
first_tx->size = skb->len;
xennet_mark_tx_pending(queue);
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
if (notify)
notify_remote_via_irq(queue->tx_irq);
@@ -742,7 +772,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
RING_IDX rp)
{
struct xen_netif_extra_info *extra;
struct xen_netif_extra_info extra;
struct device *dev = &queue->info->netdev->dev;
RING_IDX cons = queue->rx.rsp_cons;
int err = 0;
@@ -758,24 +788,22 @@ static int xennet_get_extras(struct netfront_queue *queue,
break;
}
extra = (struct xen_netif_extra_info *)
RING_GET_RESPONSE(&queue->rx, ++cons);
RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
if (unlikely(!extra->type ||
extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
if (net_ratelimit())
dev_warn(dev, "Invalid extra type: %d\n",
extra->type);
extra.type);
err = -EINVAL;
} else {
memcpy(&extras[extra->type - 1], extra,
sizeof(*extra));
extras[extra.type - 1] = extra;
}
skb = xennet_get_rx_skb(queue, cons);
ref = xennet_get_rx_ref(queue, cons);
xennet_move_rx_slot(queue, skb, ref);
} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
queue->rx.rsp_cons = cons;
return err;
@@ -785,7 +813,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
struct netfront_rx_info *rinfo, RING_IDX rp,
struct sk_buff_head *list)
{
struct xen_netif_rx_response *rx = &rinfo->rx;
struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
struct xen_netif_extra_info *extras = rinfo->extras;
struct device *dev = &queue->info->netdev->dev;
RING_IDX cons = queue->rx.rsp_cons;
@@ -843,7 +871,8 @@ next:
break;
}
rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
rx = &rx_local;
skb = xennet_get_rx_skb(queue, cons + slots);
ref = xennet_get_rx_ref(queue, cons + slots);
slots++;
@@ -898,10 +927,11 @@ static int xennet_fill_frags(struct netfront_queue *queue,
struct sk_buff *nskb;
while ((nskb = __skb_dequeue(list))) {
struct xen_netif_rx_response *rx =
RING_GET_RESPONSE(&queue->rx, ++cons);
struct xen_netif_rx_response rx;
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
@@ -916,7 +946,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb_frag_page(nfrag),
rx->offset, rx->status, PAGE_SIZE);
rx.offset, rx.status, PAGE_SIZE);
skb_shinfo(nskb)->nr_frags = 0;
kfree_skb(nskb);
@@ -1009,12 +1039,19 @@ static int xennet_poll(struct napi_struct *napi, int budget)
skb_queue_head_init(&tmpq);
rp = queue->rx.sring->rsp_prod;
if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
dev_alert(&dev->dev, "Illegal number of responses %u\n",
rp - queue->rx.rsp_cons);
queue->info->broken = true;
spin_unlock(&queue->rx_lock);
return 0;
}
rmb(); /* Ensure we see queued responses up to 'rp'. */
i = queue->rx.rsp_cons;
work_done = 0;
while ((i != rp) && (work_done < budget)) {
memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
RING_COPY_RESPONSE(&queue->rx, i, rx);
memset(extras, 0, sizeof(rinfo.extras));
err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
@@ -1138,17 +1175,18 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue)
for (i = 0; i < NET_TX_RING_SIZE; i++) {
/* Skip over entries which are actually freelist references */
if (skb_entry_is_link(&queue->tx_skbs[i]))
if (!queue->tx_skbs[i])
continue;
skb = queue->tx_skbs[i].skb;
skb = queue->tx_skbs[i];
queue->tx_skbs[i] = NULL;
get_page(queue->grant_tx_page[i]);
gnttab_end_foreign_access(queue->grant_tx_ref[i],
GNTMAP_readonly,
(unsigned long)page_address(queue->grant_tx_page[i]));
queue->grant_tx_page[i] = NULL;
queue->grant_tx_ref[i] = GRANT_INVALID_REF;
add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
dev_kfree_skb_irq(skb);
}
}
@@ -1248,6 +1286,9 @@ static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
struct netfront_queue *queue = dev_id;
unsigned long flags;
if (queue->info->broken)
return IRQ_HANDLED;
spin_lock_irqsave(&queue->tx_lock, flags);
xennet_tx_buf_gc(queue);
spin_unlock_irqrestore(&queue->tx_lock, flags);
@@ -1260,6 +1301,9 @@ static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
struct netfront_queue *queue = dev_id;
struct net_device *dev = queue->info->netdev;
if (queue->info->broken)
return IRQ_HANDLED;
if (likely(netif_carrier_ok(dev) &&
RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
napi_schedule(&queue->napi);
@@ -1281,6 +1325,10 @@ static void xennet_poll_controller(struct net_device *dev)
struct netfront_info *info = netdev_priv(dev);
unsigned int num_queues = dev->real_num_tx_queues;
unsigned int i;
if (info->broken)
return;
for (i = 0; i < num_queues; ++i)
xennet_interrupt(0, &info->queues[i]);
}
@@ -1649,13 +1697,15 @@ static int xennet_init_queue(struct netfront_queue *queue)
snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
devid, queue->id);
/* Initialise tx_skbs as a free chain containing every entry. */
/* Initialise tx_skb_freelist as a free chain containing every entry. */
queue->tx_skb_freelist = 0;
queue->tx_pend_queue = TX_LINK_NONE;
for (i = 0; i < NET_TX_RING_SIZE; i++) {
skb_entry_set_link(&queue->tx_skbs[i], i+1);
queue->tx_link[i] = i + 1;
queue->grant_tx_ref[i] = GRANT_INVALID_REF;
queue->grant_tx_page[i] = NULL;
}
queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
/* Clear out rx_skbs */
for (i = 0; i < NET_RX_RING_SIZE; i++) {
@@ -1865,6 +1915,9 @@ static int talk_to_netback(struct xenbus_device *dev,
if (info->queues)
xennet_destroy_queues(info);
/* For the case of a reconnect reset the "broken" indicator. */
info->broken = false;
err = xennet_create_queues(info, &num_queues);
if (err < 0) {
xenbus_dev_fatal(dev, err, "creating queues");

View File

@@ -1169,15 +1169,6 @@ static int tpacpi_rfk_update_swstate(const struct tpacpi_rfk *tp_rfk)
return status;
}
/* Query FW and update rfkill sw state for all rfkill switches */
static void tpacpi_rfk_update_swstate_all(void)
{
unsigned int i;
for (i = 0; i < TPACPI_RFK_SW_MAX; i++)
tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[i]);
}
/*
* Sync the HW-blocking state of all rfkill switches,
* do notice it causes the rfkill core to schedule uevents
@@ -3029,9 +3020,6 @@ static void tpacpi_send_radiosw_update(void)
if (wlsw == TPACPI_RFK_RADIO_OFF)
tpacpi_rfk_update_hwblock_state(true);
/* Sync sw blocking state */
tpacpi_rfk_update_swstate_all();
/* Sync hw blocking state last if it is hw-unblocked */
if (wlsw == TPACPI_RFK_RADIO_ON)
tpacpi_rfk_update_hwblock_state(false);

View File

@@ -2927,7 +2927,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
shost_for_each_device(sdev, ioc->shost) {
sas_device_priv_data = sdev->hostdata;
if (!sas_device_priv_data)
if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
continue;
if (sas_device_priv_data->sas_target->sas_address
!= sas_address)

View File

@@ -1898,12 +1898,12 @@ static void session_recovery_timedout(struct work_struct *work)
}
spin_unlock_irqrestore(&session->lock, flags);
if (session->transport->session_recovery_timedout)
session->transport->session_recovery_timedout(session);
ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n");
scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n");
if (session->transport->session_recovery_timedout)
session->transport->session_recovery_timedout(session);
}
static void __iscsi_unblock_session(struct work_struct *work)

View File

@@ -489,6 +489,9 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
void *vaddr;
if (buffer->kmap_cnt) {
if (buffer->kmap_cnt == INT_MAX)
return ERR_PTR(-EOVERFLOW);
buffer->kmap_cnt++;
return buffer->vaddr;
}
@@ -509,6 +512,9 @@ static void *ion_handle_kmap_get(struct ion_handle *handle)
void *vaddr;
if (handle->kmap_cnt) {
if (handle->kmap_cnt == INT_MAX)
return ERR_PTR(-EOVERFLOW);
handle->kmap_cnt++;
return buffer->vaddr;
}

View File

@@ -2710,13 +2710,14 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev)
free_irq(dev->irq, dev);
priv->irq = 0;
}
free_rtllib(dev);
if (dev->mem_start != 0) {
iounmap((void __iomem *)dev->mem_start);
release_mem_region(pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1));
}
free_rtllib(dev);
} else {
priv = rtllib_priv(dev);
}

View File

@@ -601,6 +601,8 @@ static void thermal_zone_device_init(struct thermal_zone_device *tz)
{
struct thermal_instance *pos;
tz->temperature = THERMAL_TEMP_INVALID;
tz->prev_low_trip = -INT_MAX;
tz->prev_high_trip = INT_MAX;
list_for_each_entry(pos, &tz->thermal_instances, tz_node)
pos->initialized = false;
}

View File

@@ -99,7 +99,11 @@ static int __write_console(struct xencons_info *xencons,
cons = intf->out_cons;
prod = intf->out_prod;
mb(); /* update queue values before going on */
BUG_ON((prod - cons) > sizeof(intf->out));
if ((prod - cons) > sizeof(intf->out)) {
pr_err_once("xencons: Illegal ring page indices");
return -EINVAL;
}
while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
@@ -127,7 +131,10 @@ static int domU_write_console(uint32_t vtermno, const char *data, int len)
*/
while (len) {
int sent = __write_console(cons, data, len);
if (sent < 0)
return sent;
data += sent;
len -= sent;
@@ -151,7 +158,11 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
cons = intf->in_cons;
prod = intf->in_prod;
mb(); /* get pointers before reading ring */
BUG_ON((prod - cons) > sizeof(intf->in));
if ((prod - cons) > sizeof(intf->in)) {
pr_err_once("xencons: Illegal ring page indices");
return -EINVAL;
}
while (cons != prod && recv < len)
buf[recv++] = intf->in[MASK_XENCONS_IDX(cons++, intf->in)];

View File

@@ -2702,6 +2702,7 @@ MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
static const struct acpi_device_id sbsa_uart_acpi_match[] = {
{ "ARMH0011", 0 },
{ "ARMHB000", 0 },
{},
};
MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);

View File

@@ -611,6 +611,9 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
u32 val;
int ret;
if (IS_ENABLED(CONFIG_CONSOLE_POLL))
return;
if (!dma->chan)
return;

View File

@@ -1525,6 +1525,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport = uart_port_check(state);
char *buf;
/*
* At this point, we stop accepting input. To do this, we
@@ -1546,8 +1547,18 @@ static void uart_tty_port_shutdown(struct tty_port *port)
*/
tty_port_set_suspended(port, 0);
uart_change_pm(state, UART_PM_STATE_OFF);
/*
* Free the transmit buffer.
*/
spin_lock_irq(&uport->lock);
buf = state->xmit.buf;
state->xmit.buf = NULL;
spin_unlock_irq(&uport->lock);
if (buf)
free_page((unsigned long)buf);
uart_change_pm(state, UART_PM_STATE_OFF);
}
static void uart_wait_until_sent(struct tty_struct *tty, int timeout)

View File

@@ -4460,8 +4460,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
if (oldspeed == USB_SPEED_LOW)
delay = HUB_LONG_RESET_TIME;
mutex_lock(hcd->address0_mutex);
/* Reset the device; full speed may morph to high speed */
/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
retval = hub_port_reset(hub, port1, udev, delay, false);
@@ -4748,7 +4746,6 @@ fail:
hub_port_disable(hub, port1, 0);
update_devnum(udev, devnum); /* for disconnect processing */
}
mutex_unlock(hcd->address0_mutex);
return retval;
}
@@ -4838,6 +4835,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
static int unreliable_port = -1;
bool retry_locked;
/* Disconnect any existing devices under this port */
if (udev) {
@@ -4893,7 +4891,11 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
unit_load = 100;
status = 0;
for (i = 0; i < SET_CONFIG_TRIES; i++) {
usb_lock_port(port_dev);
mutex_lock(hcd->address0_mutex);
retry_locked = true;
/* reallocate for each attempt, since references
* to the previous one can escape in various ways
@@ -4902,6 +4904,8 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
if (!udev) {
dev_err(&port_dev->dev,
"couldn't allocate usb_device\n");
mutex_unlock(hcd->address0_mutex);
usb_unlock_port(port_dev);
goto done;
}
@@ -4923,12 +4927,14 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
}
/* reset (non-USB 3.0 devices) and get descriptor */
usb_lock_port(port_dev);
status = hub_port_init(hub, udev, port1, i);
usb_unlock_port(port_dev);
if (status < 0)
goto loop;
mutex_unlock(hcd->address0_mutex);
usb_unlock_port(port_dev);
retry_locked = false;
if (udev->quirks & USB_QUIRK_DELAY_INIT)
msleep(2000);
@@ -5021,6 +5027,10 @@ loop:
usb_ep0_reinit(udev);
release_devnum(udev);
hub_free_dev(udev);
if (retry_locked) {
mutex_unlock(hcd->address0_mutex);
usb_unlock_port(port_dev);
}
usb_put_dev(udev);
if ((status == -ENOTCONN) || (status == -ENOTSUPP))
break;
@@ -5572,6 +5582,8 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
bos = udev->bos;
udev->bos = NULL;
mutex_lock(hcd->address0_mutex);
for (i = 0; i < SET_CONFIG_TRIES; ++i) {
/* ep0 maxpacket size may change; let the HCD know about it.
@@ -5581,6 +5593,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
break;
}
mutex_unlock(hcd->address0_mutex);
if (ret < 0)
goto re_enumerate;

View File

@@ -1243,6 +1243,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
.driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
.driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
@@ -2072,6 +2074,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */

View File

@@ -406,7 +406,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
else
virtio_transport_free_pkt(pkt);
vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
vhost_add_used(vq, head, 0);
added = true;
}

View File

@@ -420,11 +420,17 @@ static void vgacon_init(struct vc_data *c, int init)
struct uni_pagedir *p;
/*
* We cannot be loaded as a module, therefore init is always 1,
* but vgacon_init can be called more than once, and init will
* not be 1.
* We cannot be loaded as a module, therefore init will be 1
* if we are the default console, however if we are a fallback
* console, for example if fbcon has failed registration, then
* init will be 0, so we need to make sure our boot parameters
* have been copied to the console structure for vgacon_resize
* ultimately called by vc_resize. Any subsequent calls to
* vgacon_init init will have init set to 0 too.
*/
c->vc_can_do_color = vga_can_do_color;
c->vc_scan_lines = vga_scan_lines;
c->vc_font.height = c->vc_cell_height = vga_video_font_height;
/* set dimensions manually if init != 0 since vc_resize() will fail */
if (init) {
@@ -433,8 +439,6 @@ static void vgacon_init(struct vc_data *c, int init)
} else
vc_resize(c, vga_video_num_columns, vga_video_num_lines);
c->vc_scan_lines = vga_scan_lines;
c->vc_font.height = c->vc_cell_height = vga_video_font_height;
c->vc_complement_mask = 0x7700;
if (vga_512_chars)
c->vc_hi_font_mask = 0x0800;

View File

@@ -764,7 +764,7 @@ static struct notifier_block xenbus_resume_nb = {
static int __init xenbus_init(void)
{
int err = 0;
int err;
uint64_t v = 0;
xen_store_domain_type = XS_UNKNOWN;
@@ -804,6 +804,29 @@ static int __init xenbus_init(void)
err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
if (err)
goto out_error;
/*
* Uninitialized hvm_params are zero and return no error.
* Although it is theoretically possible to have
* HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is
* not zero when valid. If zero, it means that Xenstore hasn't
* been properly initialized. Instead of attempting to map a
* wrong guest physical address return error.
*
* Also recognize all bits set as an invalid value.
*/
if (!v || !~v) {
err = -ENOENT;
goto out_error;
}
/* Avoid truncation on 32-bit. */
#if BITS_PER_LONG == 32
if (v > ULONG_MAX) {
pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
__func__, v);
err = -EINVAL;
goto out_error;
}
#endif
xen_store_gfn = (unsigned long)v;
xen_store_interface =
xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
@@ -832,8 +855,10 @@ static int __init xenbus_init(void)
*/
proc_mkdir("xen", NULL);
#endif
return 0;
out_error:
xen_store_domain_type = XS_UNKNOWN;
return err;
}

View File

@@ -692,7 +692,7 @@ void do_close_on_exec(struct files_struct *files)
spin_unlock(&files->file_lock);
}
static struct file *__fget(unsigned int fd, fmode_t mask)
static struct file *__fget(unsigned int fd, fmode_t mask, unsigned int refs)
{
struct files_struct *files = current->files;
struct file *file;
@@ -707,23 +707,32 @@ loop:
*/
if (file->f_mode & mask)
file = NULL;
else if (!get_file_rcu(file))
else if (!get_file_rcu_many(file, refs))
goto loop;
else if (__fcheck_files(files, fd) != file) {
fput_many(file, refs);
goto loop;
}
}
rcu_read_unlock();
return file;
}
struct file *fget_many(unsigned int fd, unsigned int refs)
{
return __fget(fd, FMODE_PATH, refs);
}
struct file *fget(unsigned int fd)
{
return __fget(fd, FMODE_PATH);
return __fget(fd, FMODE_PATH, 1);
}
EXPORT_SYMBOL(fget);
struct file *fget_raw(unsigned int fd)
{
return __fget(fd, 0);
return __fget(fd, 0, 1);
}
EXPORT_SYMBOL(fget_raw);
@@ -754,7 +763,7 @@ static unsigned long __fget_light(unsigned int fd, fmode_t mask)
return 0;
return (unsigned long)file;
} else {
file = __fget(fd, mask);
file = __fget(fd, mask, 1);
if (!file)
return 0;
return FDPUT_FPUT | (unsigned long)file;

View File

@@ -261,9 +261,9 @@ void flush_delayed_fput(void)
static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
void fput(struct file *file)
void fput_many(struct file *file, unsigned int refs)
{
if (atomic_long_dec_and_test(&file->f_count)) {
if (atomic_long_sub_and_test(refs, &file->f_count)) {
struct task_struct *task = current;
if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
@@ -282,6 +282,11 @@ void fput(struct file *file)
}
}
void fput(struct file *file)
{
fput_many(file, 1);
}
/*
* synchronous analog of fput(); for kernel threads that might be needed
* in some umount() (and thus can't use flush_delayed_fput() without

View File

@@ -907,6 +907,12 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
if (!(buf->flags & PIPE_BUF_FLAG_LRU))
lru_cache_add_file(newpage);
/*
* Release while we have extra ref on stolen page. Otherwise
* anon_pipe_buf_release() might think the page can be reused.
*/
pipe_buf_release(cs->pipe, buf);
err = 0;
spin_lock(&cs->req->waitq.lock);
if (test_bit(FR_ABORTED, &cs->req->flags))
@@ -2050,8 +2056,12 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
pipe_lock(pipe);
out_free:
for (idx = 0; idx < nbuf; idx++)
pipe_buf_release(pipe, &bufs[idx]);
for (idx = 0; idx < nbuf; idx++) {
struct pipe_buffer *buf = &bufs[idx];
if (buf->ops)
pipe_buf_release(pipe, buf);
}
pipe_unlock(pipe);
kfree(bufs);

View File

@@ -181,8 +181,9 @@ static ssize_t _nfs42_proc_copy(struct file *src,
return status;
}
truncate_pagecache_range(dst_inode, pos_dst,
pos_dst + res->write_res.count);
WARN_ON_ONCE(invalidate_inode_pages2_range(dst_inode->i_mapping,
pos_dst >> PAGE_SHIFT,
(pos_dst + res->write_res.count - 1) >> PAGE_SHIFT));
return res->write_res.count;
}

View File

@@ -593,8 +593,7 @@ static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp,
status = decode_clone(xdr);
if (status)
goto out;
status = decode_getfattr(xdr, res->dst_fattr, res->server);
decode_getfattr(xdr, res->dst_fattr, res->server);
out:
res->rpc_status = status;
return status;

View File

@@ -105,14 +105,19 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
nr_bytes = count;
/* If pfn is not ram, return zeros for sparse dump files */
if (pfn_is_ram(pfn) == 0)
memset(buf, 0, nr_bytes);
else {
if (pfn_is_ram(pfn) == 0) {
tmp = 0;
if (!userbuf)
memset(buf, 0, nr_bytes);
else if (clear_user(buf, nr_bytes))
tmp = -EFAULT;
} else {
tmp = copy_oldmem_page(pfn, buf, nr_bytes,
offset, userbuf);
if (tmp < 0)
return tmp;
}
if (tmp < 0)
return tmp;
*ppos += nr_bytes;
count -= nr_bytes;
buf += nr_bytes;

View File

@@ -123,6 +123,8 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
unsigned long end);
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
int page_size);
void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
unsigned long size);
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
unsigned long address)

View File

@@ -12,6 +12,7 @@
struct file;
extern void fput(struct file *);
extern void fput_many(struct file *, unsigned int);
struct file_operations;
struct vfsmount;
@@ -40,6 +41,7 @@ static inline void fdput(struct fd fd)
}
extern struct file *fget(unsigned int fd);
extern struct file *fget_many(unsigned int fd, unsigned int refs);
extern struct file *fget_raw(unsigned int fd);
extern unsigned long __fdget(unsigned int fd);
extern unsigned long __fdget_raw(unsigned int fd);

View File

@@ -958,7 +958,9 @@ static inline struct file *get_file(struct file *f)
atomic_long_inc(&f->f_count);
return f;
}
#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count)
#define get_file_rcu_many(x, cnt) \
atomic_long_add_unless(&(x)->f_count, (cnt), 0)
#define get_file_rcu(x) get_file_rcu_many((x), 1)
#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
#define file_count(x) atomic_long_read(&(x)->f_count)

View File

@@ -122,6 +122,16 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
return ns;
}
static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
{
if (ns) {
if (atomic_inc_not_zero(&ns->count))
return ns;
}
return NULL;
}
extern void put_ipc_ns(struct ipc_namespace *ns);
#else
static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
@@ -138,6 +148,11 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
return ns;
}
static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
{
return ns;
}
static inline void put_ipc_ns(struct ipc_namespace *ns)
{
}

View File

@@ -192,6 +192,8 @@ struct kretprobe {
raw_spinlock_t lock;
};
#define KRETPROBE_MAX_DATA_SIZE 4096
struct kretprobe_instance {
struct hlist_node hlist;
struct kretprobe *rp;

View File

@@ -3209,7 +3209,7 @@ static inline int thread_group_empty(struct task_struct *p)
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also
* pins the final release of task.io_context. Also protects ->cpuset and
* ->cgroup.subsys[]. And ->vfork_done.
* ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
*
* Nests both inside and outside of read_lock(&tasklist_lock).
* It must not be nested with write_lock_irq(&tasklist_lock),

View File

@@ -19,9 +19,18 @@ struct shmid_kernel /* private to the kernel */
pid_t shm_lprid;
struct user_struct *mlock_user;
/* The task created the shm object. NULL if the task is dead. */
/*
* The task created the shm object, for
* task_lock(shp->shm_creator)
*/
struct task_struct *shm_creator;
struct list_head shm_clist; /* list by creator */
/*
* List by creator. task_lock(->shm_creator) required for read/write.
* If list_empty(), then the creator is dead already.
*/
struct list_head shm_clist;
struct ipc_namespace *ns;
};
/* shm_mode upper byte flags */

View File

@@ -27,9 +27,7 @@ static inline bool siphash_key_is_zero(const siphash_key_t *key)
}
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
#endif
u64 siphash_1u64(const u64 a, const siphash_key_t *key);
u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
@@ -82,10 +80,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
static inline u64 siphash(const void *data, size_t len,
const siphash_key_t *key)
{
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
return __siphash_unaligned(data, len, key);
#endif
return ___siphash_aligned(data, len, key);
}
@@ -96,10 +93,8 @@ typedef struct {
u32 __hsiphash_aligned(const void *data, size_t len,
const hsiphash_key_t *key);
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key);
#endif
u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
@@ -135,10 +130,9 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
static inline u32 hsiphash(const void *data, size_t len,
const hsiphash_key_t *key)
{
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
return __hsiphash_unaligned(data, len, key);
#endif
return ___hsiphash_aligned(data, len, key);
}

View File

@@ -42,6 +42,7 @@ enum nci_flag {
NCI_UP,
NCI_DATA_EXCHANGE,
NCI_DATA_EXCHANGE_TO,
NCI_UNREG,
};
/* NCI device states */

View File

@@ -19,6 +19,8 @@
*
*/
#include <linux/types.h>
#define NL802154_GENL_NAME "nl802154"
enum nl802154_commands {
@@ -150,10 +152,9 @@ enum nl802154_attrs {
};
enum nl802154_iftype {
/* for backwards compatibility TODO */
NL802154_IFTYPE_UNSPEC = -1,
NL802154_IFTYPE_UNSPEC = (~(__u32)0),
NL802154_IFTYPE_NODE,
NL802154_IFTYPE_NODE = 0,
NL802154_IFTYPE_MONITOR,
NL802154_IFTYPE_COORD,

View File

@@ -24,82 +24,79 @@ typedef unsigned int RING_IDX;
* A ring contains as many entries as will fit, rounded down to the nearest
* power of two (so we can mask with (size-1) to loop around).
*/
#define __CONST_RING_SIZE(_s, _sz) \
(__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
sizeof(((struct _s##_sring *)0)->ring[0])))
#define __CONST_RING_SIZE(_s, _sz) \
(__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
sizeof(((struct _s##_sring *)0)->ring[0])))
/*
* The same for passing in an actual pointer instead of a name tag.
*/
#define __RING_SIZE(_s, _sz) \
(__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
#define __RING_SIZE(_s, _sz) \
(__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
/*
* Macros to make the correct C datatypes for a new kind of ring.
*
* To make a new ring datatype, you need to have two message structures,
* let's say struct request, and struct response already defined.
* let's say request_t, and response_t already defined.
*
* In a header where you want the ring datatype declared, you then do:
*
* DEFINE_RING_TYPES(mytag, struct request, struct response);
* DEFINE_RING_TYPES(mytag, request_t, response_t);
*
* These expand out to give you a set of types, as you can see below.
* The most important of these are:
*
* struct mytag_sring - The shared ring.
* struct mytag_front_ring - The 'front' half of the ring.
* struct mytag_back_ring - The 'back' half of the ring.
* mytag_sring_t - The shared ring.
* mytag_front_ring_t - The 'front' half of the ring.
* mytag_back_ring_t - The 'back' half of the ring.
*
* To initialize a ring in your code you need to know the location and size
* of the shared memory area (PAGE_SIZE, for instance). To initialise
* the front half:
*
* struct mytag_front_ring front_ring;
* SHARED_RING_INIT((struct mytag_sring *)shared_page);
* FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
* PAGE_SIZE);
* mytag_front_ring_t front_ring;
* SHARED_RING_INIT((mytag_sring_t *)shared_page);
* FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
*
* Initializing the back follows similarly (note that only the front
* initializes the shared ring):
*
* struct mytag_back_ring back_ring;
* BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
* PAGE_SIZE);
* mytag_back_ring_t back_ring;
* BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
*/
#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
\
/* Shared ring entry */ \
union __name##_sring_entry { \
__req_t req; \
__rsp_t rsp; \
}; \
\
/* Shared ring page */ \
struct __name##_sring { \
RING_IDX req_prod, req_event; \
RING_IDX rsp_prod, rsp_event; \
uint8_t pad[48]; \
union __name##_sring_entry ring[1]; /* variable-length */ \
}; \
\
/* "Front" end's private variables */ \
struct __name##_front_ring { \
RING_IDX req_prod_pvt; \
RING_IDX rsp_cons; \
unsigned int nr_ents; \
struct __name##_sring *sring; \
}; \
\
/* "Back" end's private variables */ \
struct __name##_back_ring { \
RING_IDX rsp_prod_pvt; \
RING_IDX req_cons; \
unsigned int nr_ents; \
struct __name##_sring *sring; \
};
#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
\
/* Shared ring entry */ \
union __name##_sring_entry { \
__req_t req; \
__rsp_t rsp; \
}; \
\
/* Shared ring page */ \
struct __name##_sring { \
RING_IDX req_prod, req_event; \
RING_IDX rsp_prod, rsp_event; \
uint8_t __pad[48]; \
union __name##_sring_entry ring[1]; /* variable-length */ \
}; \
\
/* "Front" end's private variables */ \
struct __name##_front_ring { \
RING_IDX req_prod_pvt; \
RING_IDX rsp_cons; \
unsigned int nr_ents; \
struct __name##_sring *sring; \
}; \
\
/* "Back" end's private variables */ \
struct __name##_back_ring { \
RING_IDX rsp_prod_pvt; \
RING_IDX req_cons; \
unsigned int nr_ents; \
struct __name##_sring *sring; \
}; \
\
/*
* Macros for manipulating rings.
*
@@ -116,105 +113,99 @@ struct __name##_back_ring { \
*/
/* Initialising empty rings */
#define SHARED_RING_INIT(_s) do { \
(_s)->req_prod = (_s)->rsp_prod = 0; \
(_s)->req_event = (_s)->rsp_event = 1; \
memset((_s)->pad, 0, sizeof((_s)->pad)); \
#define SHARED_RING_INIT(_s) do { \
(_s)->req_prod = (_s)->rsp_prod = 0; \
(_s)->req_event = (_s)->rsp_event = 1; \
(void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
} while(0)
#define FRONT_RING_INIT(_r, _s, __size) do { \
(_r)->req_prod_pvt = 0; \
(_r)->rsp_cons = 0; \
(_r)->nr_ents = __RING_SIZE(_s, __size); \
(_r)->sring = (_s); \
#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
(_r)->req_prod_pvt = (_i); \
(_r)->rsp_cons = (_i); \
(_r)->nr_ents = __RING_SIZE(_s, __size); \
(_r)->sring = (_s); \
} while (0)
#define BACK_RING_INIT(_r, _s, __size) do { \
(_r)->rsp_prod_pvt = 0; \
(_r)->req_cons = 0; \
(_r)->nr_ents = __RING_SIZE(_s, __size); \
(_r)->sring = (_s); \
#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
(_r)->rsp_prod_pvt = (_i); \
(_r)->req_cons = (_i); \
(_r)->nr_ents = __RING_SIZE(_s, __size); \
(_r)->sring = (_s); \
} while (0)
/* Initialize to existing shared indexes -- for recovery */
#define FRONT_RING_ATTACH(_r, _s, __size) do { \
(_r)->sring = (_s); \
(_r)->req_prod_pvt = (_s)->req_prod; \
(_r)->rsp_cons = (_s)->rsp_prod; \
(_r)->nr_ents = __RING_SIZE(_s, __size); \
} while (0)
#define BACK_RING_ATTACH(_r, _s, __size) do { \
(_r)->sring = (_s); \
(_r)->rsp_prod_pvt = (_s)->rsp_prod; \
(_r)->req_cons = (_s)->req_prod; \
(_r)->nr_ents = __RING_SIZE(_s, __size); \
} while (0)
#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
/* How big is this ring? */
#define RING_SIZE(_r) \
#define RING_SIZE(_r) \
((_r)->nr_ents)
/* Number of free requests (for use on front side only). */
#define RING_FREE_REQUESTS(_r) \
#define RING_FREE_REQUESTS(_r) \
(RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
/* Test if there is an empty slot available on the front ring.
* (This is only meaningful from the front. )
*/
#define RING_FULL(_r) \
#define RING_FULL(_r) \
(RING_FREE_REQUESTS(_r) == 0)
/* Test if there are outstanding messages to be processed on a ring. */
#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
((_r)->sring->rsp_prod - (_r)->rsp_cons)
#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
({ \
unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
unsigned int rsp = RING_SIZE(_r) - \
((_r)->req_cons - (_r)->rsp_prod_pvt); \
req < rsp ? req : rsp; \
})
#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
unsigned int rsp = RING_SIZE(_r) - \
((_r)->req_cons - (_r)->rsp_prod_pvt); \
req < rsp ? req : rsp; \
})
/* Direct access to individual ring elements, by index. */
#define RING_GET_REQUEST(_r, _idx) \
#define RING_GET_REQUEST(_r, _idx) \
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
#define RING_GET_RESPONSE(_r, _idx) \
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
/*
* Get a local copy of a request.
* Get a local copy of a request/response.
*
* Use this in preference to RING_GET_REQUEST() so all processing is
* Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is
* done on a local copy that cannot be modified by the other end.
*
* Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
* to be ineffective where _req is a struct which consists of only bitfields.
* to be ineffective where dest is a struct which consists of only bitfields.
*/
#define RING_COPY_REQUEST(_r, _idx, _req) do { \
/* Use volatile to force the copy into _req. */ \
*(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
#define RING_COPY_(type, r, idx, dest) do { \
/* Use volatile to force the copy into dest. */ \
*(dest) = *(volatile typeof(dest))RING_GET_##type(r, idx); \
} while (0)
#define RING_GET_RESPONSE(_r, _idx) \
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
#define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req)
#define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp)
/* Loop termination condition: Would the specified index overflow the ring? */
#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
(((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
/* Ill-behaved frontend determination: Can there be this many requests? */
#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
(((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
/* Ill-behaved backend determination: Can there be this many responses? */
#define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \
(((_prod) - (_r)->rsp_cons) > RING_SIZE(_r))
#define RING_PUSH_REQUESTS(_r) do { \
virt_wmb(); /* back sees requests /before/ updated producer index */ \
(_r)->sring->req_prod = (_r)->req_prod_pvt; \
#define RING_PUSH_REQUESTS(_r) do { \
virt_wmb(); /* back sees requests /before/ updated producer index */\
(_r)->sring->req_prod = (_r)->req_prod_pvt; \
} while (0)
#define RING_PUSH_RESPONSES(_r) do { \
virt_wmb(); /* front sees responses /before/ updated producer index */ \
(_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
#define RING_PUSH_RESPONSES(_r) do { \
virt_wmb(); /* front sees resps /before/ updated producer index */ \
(_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
} while (0)
/*
@@ -247,40 +238,40 @@ struct __name##_back_ring { \
* field appropriately.
*/
#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
RING_IDX __old = (_r)->sring->req_prod; \
RING_IDX __new = (_r)->req_prod_pvt; \
virt_wmb(); /* back sees requests /before/ updated producer index */ \
(_r)->sring->req_prod = __new; \
virt_mb(); /* back sees new requests /before/ we check req_event */ \
(_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
(RING_IDX)(__new - __old)); \
#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
RING_IDX __old = (_r)->sring->req_prod; \
RING_IDX __new = (_r)->req_prod_pvt; \
virt_wmb(); /* back sees requests /before/ updated producer index */\
(_r)->sring->req_prod = __new; \
virt_mb(); /* back sees new requests /before/ we check req_event */ \
(_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
(RING_IDX)(__new - __old)); \
} while (0)
#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
RING_IDX __old = (_r)->sring->rsp_prod; \
RING_IDX __new = (_r)->rsp_prod_pvt; \
virt_wmb(); /* front sees responses /before/ updated producer index */ \
(_r)->sring->rsp_prod = __new; \
virt_mb(); /* front sees new responses /before/ we check rsp_event */ \
(_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
(RING_IDX)(__new - __old)); \
#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
RING_IDX __old = (_r)->sring->rsp_prod; \
RING_IDX __new = (_r)->rsp_prod_pvt; \
virt_wmb(); /* front sees resps /before/ updated producer index */ \
(_r)->sring->rsp_prod = __new; \
virt_mb(); /* front sees new resps /before/ we check rsp_event */ \
(_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
(RING_IDX)(__new - __old)); \
} while (0)
#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
(_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
if (_work_to_do) break; \
(_r)->sring->req_event = (_r)->req_cons + 1; \
virt_mb(); \
(_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
(_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
if (_work_to_do) break; \
(_r)->sring->req_event = (_r)->req_cons + 1; \
virt_mb(); \
(_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
} while (0)
#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
(_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
if (_work_to_do) break; \
(_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
virt_mb(); \
(_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
(_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
if (_work_to_do) break; \
(_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
virt_mb(); \
(_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
} while (0)
#endif /* __XEN_PUBLIC_IO_RING_H__ */

184
ipc/shm.c
View File

@@ -90,6 +90,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
{
struct shmid_kernel *shp;
shp = container_of(ipcp, struct shmid_kernel, shm_perm);
WARN_ON(ns != shp->ns);
if (shp->shm_nattch) {
shp->shm_perm.mode |= SHM_DEST;
@@ -180,10 +181,43 @@ static void shm_rcu_free(struct rcu_head *head)
ipc_rcu_free(head);
}
static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
/*
* It has to be called with shp locked.
* It must be called before ipc_rmid()
*/
static inline void shm_clist_rm(struct shmid_kernel *shp)
{
list_del(&s->shm_clist);
ipc_rmid(&shm_ids(ns), &s->shm_perm);
struct task_struct *creator;
/* ensure that shm_creator does not disappear */
rcu_read_lock();
/*
* A concurrent exit_shm may do a list_del_init() as well.
* Just do nothing if exit_shm already did the work
*/
if (!list_empty(&shp->shm_clist)) {
/*
* shp->shm_creator is guaranteed to be valid *only*
* if shp->shm_clist is not empty.
*/
creator = shp->shm_creator;
task_lock(creator);
/*
* list_del_init() is a nop if the entry was already removed
* from the list.
*/
list_del_init(&shp->shm_clist);
task_unlock(creator);
}
rcu_read_unlock();
}
static inline void shm_rmid(struct shmid_kernel *s)
{
shm_clist_rm(s);
ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
}
@@ -238,7 +272,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
shm_file = shp->shm_file;
shp->shm_file = NULL;
ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
shm_rmid(ns, shp);
shm_rmid(shp);
shm_unlock(shp);
if (!is_file_hugepages(shm_file))
shmem_lock(shm_file, 0, shp->mlock_user);
@@ -259,10 +293,10 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
*
* 2) sysctl kernel.shm_rmid_forced is set to 1.
*/
static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
static bool shm_may_destroy(struct shmid_kernel *shp)
{
return (shp->shm_nattch == 0) &&
(ns->shm_rmid_forced ||
(shp->ns->shm_rmid_forced ||
(shp->shm_perm.mode & SHM_DEST));
}
@@ -293,7 +327,7 @@ static void shm_close(struct vm_area_struct *vma)
shp->shm_lprid = task_tgid_vnr(current);
shp->shm_dtim = get_seconds();
shp->shm_nattch--;
if (shm_may_destroy(ns, shp))
if (shm_may_destroy(shp))
shm_destroy(ns, shp);
else
shm_unlock(shp);
@@ -314,10 +348,10 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
*
* As shp->* are changed under rwsem, it's safe to skip shp locking.
*/
if (shp->shm_creator != NULL)
if (!list_empty(&shp->shm_clist))
return 0;
if (shm_may_destroy(ns, shp)) {
if (shm_may_destroy(shp)) {
shm_lock_by_ptr(shp);
shm_destroy(ns, shp);
}
@@ -335,48 +369,97 @@ void shm_destroy_orphaned(struct ipc_namespace *ns)
/* Locking assumes this will only be called with task == current */
void exit_shm(struct task_struct *task)
{
struct ipc_namespace *ns = task->nsproxy->ipc_ns;
struct shmid_kernel *shp, *n;
for (;;) {
struct shmid_kernel *shp;
struct ipc_namespace *ns;
if (list_empty(&task->sysvshm.shm_clist))
return;
task_lock(task);
/*
* If kernel.shm_rmid_forced is not set then only keep track of
* which shmids are orphaned, so that a later set of the sysctl
* can clean them up.
*/
if (!ns->shm_rmid_forced) {
down_read(&shm_ids(ns).rwsem);
list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
shp->shm_creator = NULL;
/*
* Only under read lock but we are only called on current
* so no entry on the list will be shared.
*/
list_del(&task->sysvshm.shm_clist);
up_read(&shm_ids(ns).rwsem);
return;
}
/*
* Destroy all already created segments, that were not yet mapped,
* and mark any mapped as orphan to cover the sysctl toggling.
* Destroy is skipped if shm_may_destroy() returns false.
*/
down_write(&shm_ids(ns).rwsem);
list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
shp->shm_creator = NULL;
if (shm_may_destroy(ns, shp)) {
shm_lock_by_ptr(shp);
shm_destroy(ns, shp);
if (list_empty(&task->sysvshm.shm_clist)) {
task_unlock(task);
break;
}
}
/* Remove the list head from any segments still attached. */
list_del(&task->sysvshm.shm_clist);
up_write(&shm_ids(ns).rwsem);
shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
shm_clist);
/*
* 1) Get pointer to the ipc namespace. It is worth to say
* that this pointer is guaranteed to be valid because
* shp lifetime is always shorter than namespace lifetime
* in which shp lives.
* We taken task_lock it means that shp won't be freed.
*/
ns = shp->ns;
/*
* 2) If kernel.shm_rmid_forced is not set then only keep track of
* which shmids are orphaned, so that a later set of the sysctl
* can clean them up.
*/
if (!ns->shm_rmid_forced)
goto unlink_continue;
/*
* 3) get a reference to the namespace.
* The refcount could be already 0. If it is 0, then
* the shm objects will be free by free_ipc_work().
*/
ns = get_ipc_ns_not_zero(ns);
if (!ns) {
unlink_continue:
list_del_init(&shp->shm_clist);
task_unlock(task);
continue;
}
/*
* 4) get a reference to shp.
* This cannot fail: shm_clist_rm() is called before
* ipc_rmid(), thus the refcount cannot be 0.
*/
WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
/*
* 5) unlink the shm segment from the list of segments
* created by current.
* This must be done last. After unlinking,
* only the refcounts obtained above prevent IPC_RMID
* from destroying the segment or the namespace.
*/
list_del_init(&shp->shm_clist);
task_unlock(task);
/*
* 6) we have all references
* Thus lock & if needed destroy shp.
*/
down_write(&shm_ids(ns).rwsem);
shm_lock_by_ptr(shp);
/*
* rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
* safe to call ipc_rcu_putref here
*/
ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
if (ipc_valid_object(&shp->shm_perm)) {
if (shm_may_destroy(shp))
shm_destroy(ns, shp);
else
shm_unlock(shp);
} else {
/*
* Someone else deleted the shp from namespace
* idr/kht while we have waited.
* Just unlock and continue.
*/
shm_unlock(shp);
}
up_write(&shm_ids(ns).rwsem);
put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
}
}
static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -621,7 +704,11 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
goto no_id;
}
shp->ns = ns;
task_lock(current);
list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
task_unlock(current);
/*
* shmid gets reported as "inode#" in /proc/pid/maps.
@@ -1270,7 +1357,8 @@ out_nattch:
down_write(&shm_ids(ns).rwsem);
shp = shm_lock(ns, shmid);
shp->shm_nattch--;
if (shm_may_destroy(ns, shp))
if (shm_may_destroy(shp))
shm_destroy(ns, shp);
else
shm_unlock(shp);

View File

@@ -1899,6 +1899,9 @@ int register_kretprobe(struct kretprobe *rp)
}
}
if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
return -E2BIG;
rp->kp.pre_handler = pre_handler_kretprobe;
rp->kp.post_handler = NULL;
rp->kp.fault_handler = NULL;

View File

@@ -672,7 +672,7 @@ static int load_image_and_restore(void)
goto Unlock;
error = swsusp_read(&flags);
swsusp_close(FMODE_READ);
swsusp_close(FMODE_READ | FMODE_EXCL);
if (!error)
hibernation_restore(flags & SF_PLATFORM_MODE);
@@ -866,7 +866,7 @@ static int software_resume(void)
/* The snapshot device should not be opened while we're running */
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
error = -EBUSY;
swsusp_close(FMODE_READ);
swsusp_close(FMODE_READ | FMODE_EXCL);
goto Unlock;
}
@@ -900,7 +900,7 @@ static int software_resume(void)
pr_debug("PM: Hibernation image not present or could not be loaded.\n");
return error;
Close_Finish:
swsusp_close(FMODE_READ);
swsusp_close(FMODE_READ | FMODE_EXCL);
goto Finish;
}

View File

@@ -1163,14 +1163,26 @@ __event_trigger_test_discard(struct trace_event_file *file,
if (eflags & EVENT_FILE_FL_TRIGGER_COND)
*tt = event_triggers_call(file, entry);
if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
(unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
!filter_match_preds(file->filter, entry))) {
__trace_event_discard_commit(buffer, event);
return true;
}
if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
EVENT_FILE_FL_FILTERED |
EVENT_FILE_FL_PID_FILTER))))
return false;
if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
goto discard;
if (file->flags & EVENT_FILE_FL_FILTERED &&
!filter_match_preds(file->filter, entry))
goto discard;
if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
trace_event_ignore_this_pid(file))
goto discard;
return false;
discard:
__trace_event_discard_commit(buffer, event);
return true;
}
/**

View File

@@ -2241,12 +2241,19 @@ static struct trace_event_file *
trace_create_new_event(struct trace_event_call *call,
struct trace_array *tr)
{
struct trace_pid_list *pid_list;
struct trace_event_file *file;
file = kmem_cache_alloc(file_cachep, GFP_TRACE);
if (!file)
return NULL;
pid_list = rcu_dereference_protected(tr->filtered_pids,
lockdep_is_held(&event_mutex));
if (pid_list)
file->flags |= EVENT_FILE_FL_PID_FILTER;
file->event_call = call;
file->tr = tr;
atomic_set(&file->sm_ref, 0);

View File

@@ -49,6 +49,7 @@
SIPROUND; \
return (v0 ^ v1) ^ (v2 ^ v3);
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u64));
@@ -80,8 +81,8 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
POSTAMBLE
}
EXPORT_SYMBOL(__siphash_aligned);
#endif
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u64));
@@ -113,7 +114,6 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
POSTAMBLE
}
EXPORT_SYMBOL(__siphash_unaligned);
#endif
/**
* siphash_1u64 - compute 64-bit siphash PRF value of a u64
@@ -250,6 +250,7 @@ EXPORT_SYMBOL(siphash_3u32);
HSIPROUND; \
return (v0 ^ v1) ^ (v2 ^ v3);
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u64));
@@ -280,8 +281,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
HPOSTAMBLE
}
EXPORT_SYMBOL(__hsiphash_aligned);
#endif
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key)
{
@@ -313,7 +314,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
HPOSTAMBLE
}
EXPORT_SYMBOL(__hsiphash_unaligned);
#endif
/**
* hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
@@ -418,6 +418,7 @@ EXPORT_SYMBOL(hsiphash_4u32);
HSIPROUND; \
return v1 ^ v3;
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u32));
@@ -438,8 +439,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
HPOSTAMBLE
}
EXPORT_SYMBOL(__hsiphash_aligned);
#endif
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key)
{
@@ -461,7 +462,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
HPOSTAMBLE
}
EXPORT_SYMBOL(__hsiphash_unaligned);
#endif
/**
* hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32

View File

@@ -3393,14 +3393,20 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
struct page *page;
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
const unsigned long mmun_start = start; /* For mmu_notifiers */
const unsigned long mmun_end = end; /* For mmu_notifiers */
unsigned long mmun_start = start; /* For mmu_notifiers */
unsigned long mmun_end = end; /* For mmu_notifiers */
bool force_flush = false;
WARN_ON(!is_vm_hugetlb_page(vma));
BUG_ON(start & ~huge_page_mask(h));
BUG_ON(end & ~huge_page_mask(h));
tlb_start_vma(tlb, vma);
/*
* If sharing possible, alert mmu notifiers of worst case.
*/
adjust_range_if_pmd_sharing_possible(vma, &mmun_start, &mmun_end);
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
address = start;
for (; address < end; address += sz) {
@@ -3411,6 +3417,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
ptl = huge_pte_lock(h, mm, ptep);
if (huge_pmd_unshare(mm, &address, ptep)) {
spin_unlock(ptl);
tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
force_flush = true;
continue;
}
@@ -3467,6 +3475,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
}
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
tlb_end_vma(tlb, vma);
/*
* If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
* could defer the flush until now, since by holding i_mmap_rwsem we
* guaranteed that the last refernece would not be dropped. But we must
* do the flushing before we return, as otherwise i_mmap_rwsem will be
* dropped and the last reference to the shared PMDs page might be
* dropped as well.
*
* In theory we could defer the freeing of the PMD pages as well, but
* huge_pmd_unshare() relies on the exact page_count for the PMD page to
* detect sharing, so we cannot defer the release of the page either.
* Instead, do flush now.
*/
if (force_flush)
tlb_flush_mmu(tlb);
}
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
@@ -3493,12 +3517,23 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
{
struct mm_struct *mm;
struct mmu_gather tlb;
unsigned long tlb_start = start;
unsigned long tlb_end = end;
/*
* If shared PMDs were possibly used within this vma range, adjust
* start/end for worst case tlb flushing.
* Note that we can not be sure if PMDs are shared until we try to
* unmap pages. However, we want to make sure TLB flushing covers
* the largest possible range.
*/
adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
mm = vma->vm_mm;
tlb_gather_mmu(&tlb, mm, start, end);
tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
tlb_finish_mmu(&tlb, start, end);
tlb_finish_mmu(&tlb, tlb_start, tlb_end);
}
/*
@@ -4186,11 +4221,21 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
pte_t pte;
struct hstate *h = hstate_vma(vma);
unsigned long pages = 0;
unsigned long f_start = start;
unsigned long f_end = end;
bool shared_pmd = false;
/*
* In the case of shared PMDs, the area to flush could be beyond
* start/end. Set f_start/f_end to cover the maximum possible
* range if PMD sharing is possible.
*/
adjust_range_if_pmd_sharing_possible(vma, &f_start, &f_end);
BUG_ON(address >= end);
flush_cache_range(vma, address, end);
flush_cache_range(vma, f_start, f_end);
mmu_notifier_invalidate_range_start(mm, start, end);
mmu_notifier_invalidate_range_start(mm, f_start, f_end);
i_mmap_lock_write(vma->vm_file->f_mapping);
for (; address < end; address += huge_page_size(h)) {
spinlock_t *ptl;
@@ -4201,6 +4246,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
if (huge_pmd_unshare(mm, &address, ptep)) {
pages++;
spin_unlock(ptl);
shared_pmd = true;
continue;
}
pte = huge_ptep_get(ptep);
@@ -4235,12 +4281,18 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
* Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
* may have cleared our pud entry and done put_page on the page table:
* once we release i_mmap_rwsem, another task can do the final put_page
* and that page table be reused and filled with junk.
* and that page table be reused and filled with junk. If we actually
* did unshare a page of pmds, flush the range corresponding to the pud.
*/
flush_hugetlb_tlb_range(vma, start, end);
mmu_notifier_invalidate_range(mm, start, end);
if (shared_pmd) {
flush_hugetlb_tlb_range(vma, f_start, f_end);
mmu_notifier_invalidate_range(mm, f_start, f_end);
} else {
flush_hugetlb_tlb_range(vma, start, end);
mmu_notifier_invalidate_range(mm, start, end);
}
i_mmap_unlock_write(vma->vm_file->f_mapping);
mmu_notifier_invalidate_range_end(mm, start, end);
mmu_notifier_invalidate_range_end(mm, f_start, f_end);
return pages << h->order;
}

View File

@@ -320,6 +320,22 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
return false;
}
void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
unsigned long size)
{
if (tlb->page_size != 0 && tlb->page_size != PMD_SIZE)
tlb_flush_mmu(tlb);
tlb->page_size = PMD_SIZE;
tlb->start = min(tlb->start, address);
tlb->end = max(tlb->end, address + size);
/*
* Track the last address with which we adjusted the range. This
* will be used later to adjust again after a mmu_flush due to
* failed __tlb_remove_page
*/
tlb->addr = address + size - PMD_SIZE;
}
#endif /* HAVE_GENERIC_MMU_GATHER */
#ifdef CONFIG_HAVE_RCU_TABLE_FREE

View File

@@ -2271,7 +2271,7 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
free:
kfree(t);
out:
return -ENOBUFS;
return -ENOMEM;
}
static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)

View File

@@ -342,8 +342,6 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
return;
if (tcp_in_slow_start(tp)) {
if (hystart && after(ack, ca->end_seq))
bictcp_hystart_reset(sk);
acked = tcp_slow_start(tp, acked);
if (!acked)
return;
@@ -394,6 +392,9 @@ static void hystart_update(struct sock *sk, u32 delay)
if (ca->found & hystart_detect)
return;
if (after(tp->snd_una, ca->end_seq))
bictcp_hystart_reset(sk);
if (hystart_detect & HYSTART_ACK_TRAIN) {
u32 now = bictcp_clock();

View File

@@ -486,6 +486,11 @@ static int nci_open_device(struct nci_dev *ndev)
mutex_lock(&ndev->req_lock);
if (test_bit(NCI_UNREG, &ndev->flags)) {
rc = -ENODEV;
goto done;
}
if (test_bit(NCI_UP, &ndev->flags)) {
rc = -EALREADY;
goto done;
@@ -549,6 +554,10 @@ done:
static int nci_close_device(struct nci_dev *ndev)
{
nci_req_cancel(ndev, ENODEV);
/* This mutex needs to be held as a barrier for
* caller nci_unregister_device
*/
mutex_lock(&ndev->req_lock);
if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
@@ -586,8 +595,8 @@ static int nci_close_device(struct nci_dev *ndev)
/* Flush cmd wq */
flush_workqueue(ndev->cmd_wq);
/* Clear flags */
ndev->flags = 0;
/* Clear flags except NCI_UNREG */
ndev->flags &= BIT(NCI_UNREG);
mutex_unlock(&ndev->req_lock);
@@ -1271,6 +1280,12 @@ void nci_unregister_device(struct nci_dev *ndev)
{
struct nci_conn_info *conn_info, *n;
/* This set_bit is not protected with specialized barrier,
* However, it is fine because the mutex_lock(&ndev->req_lock);
* in nci_close_device() will help to emit one.
*/
set_bit(NCI_UNREG, &ndev->flags);
nci_close_device(ndev);
destroy_workqueue(ndev->cmd_wq);

View File

@@ -389,7 +389,7 @@ void rds_tcp_tune(struct socket *sock)
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
}
if (rtn->rcvbuf_size > 0) {
sk->sk_sndbuf = rtn->rcvbuf_size;
sk->sk_rcvbuf = rtn->rcvbuf_size;
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
}
release_sock(sk);

View File

@@ -27,16 +27,15 @@
#define BLANK_SLOT 4094
static int amixer_master(struct rsc *rsc)
static void amixer_master(struct rsc *rsc)
{
rsc->conj = 0;
return rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
}
static int amixer_next_conj(struct rsc *rsc)
static void amixer_next_conj(struct rsc *rsc)
{
rsc->conj++;
return container_of(rsc, struct amixer, rsc)->idx[rsc->conj];
}
static int amixer_index(const struct rsc *rsc)
@@ -335,16 +334,15 @@ int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr)
/* SUM resource management */
static int sum_master(struct rsc *rsc)
static void sum_master(struct rsc *rsc)
{
rsc->conj = 0;
return rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
}
static int sum_next_conj(struct rsc *rsc)
static void sum_next_conj(struct rsc *rsc)
{
rsc->conj++;
return container_of(rsc, struct sum, rsc)->idx[rsc->conj];
}
static int sum_index(const struct rsc *rsc)

View File

@@ -55,12 +55,12 @@ static struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
[SPDIFIO] = {.left = 0x05, .right = 0x85},
};
static int daio_master(struct rsc *rsc)
static void daio_master(struct rsc *rsc)
{
/* Actually, this is not the resource index of DAIO.
* For DAO, it is the input mapper index. And, for DAI,
* it is the output time-slot index. */
return rsc->conj = rsc->idx;
rsc->conj = rsc->idx;
}
static int daio_index(const struct rsc *rsc)
@@ -68,19 +68,19 @@ static int daio_index(const struct rsc *rsc)
return rsc->conj;
}
static int daio_out_next_conj(struct rsc *rsc)
static void daio_out_next_conj(struct rsc *rsc)
{
return rsc->conj += 2;
rsc->conj += 2;
}
static int daio_in_next_conj_20k1(struct rsc *rsc)
static void daio_in_next_conj_20k1(struct rsc *rsc)
{
return rsc->conj += 0x200;
rsc->conj += 0x200;
}
static int daio_in_next_conj_20k2(struct rsc *rsc)
static void daio_in_next_conj_20k2(struct rsc *rsc)
{
return rsc->conj += 0x100;
rsc->conj += 0x100;
}
static const struct rsc_ops daio_out_rsc_ops = {

View File

@@ -113,18 +113,17 @@ static int audio_ring_slot(const struct rsc *rsc)
return (rsc->conj << 4) + offset_in_audio_slot_block[rsc->type];
}
static int rsc_next_conj(struct rsc *rsc)
static void rsc_next_conj(struct rsc *rsc)
{
unsigned int i;
for (i = 0; (i < 8) && (!(rsc->msr & (0x1 << i))); )
i++;
rsc->conj += (AUDIO_SLOT_BLOCK_NUM >> i);
return rsc->conj;
}
static int rsc_master(struct rsc *rsc)
static void rsc_master(struct rsc *rsc)
{
return rsc->conj = rsc->idx;
rsc->conj = rsc->idx;
}
static const struct rsc_ops rsc_generic_ops = {

View File

@@ -43,8 +43,8 @@ struct rsc {
};
struct rsc_ops {
int (*master)(struct rsc *rsc); /* Move to master resource */
int (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
void (*master)(struct rsc *rsc); /* Move to master resource */
void (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
int (*index)(const struct rsc *rsc); /* Return the index of resource */
/* Return the output slot number */
int (*output_slot)(const struct rsc *rsc);

View File

@@ -594,16 +594,15 @@ int src_mgr_destroy(struct src_mgr *src_mgr)
/* SRCIMP resource manager operations */
static int srcimp_master(struct rsc *rsc)
static void srcimp_master(struct rsc *rsc)
{
rsc->conj = 0;
return rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
}
static int srcimp_next_conj(struct rsc *rsc)
static void srcimp_next_conj(struct rsc *rsc)
{
rsc->conj++;
return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj];
}
static int srcimp_index(const struct rsc *rsc)

View File

@@ -2050,6 +2050,7 @@ EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all);
/* remove dynamic controls from the component driver */
int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
{
struct snd_card *card = comp->card->snd_card;
struct snd_soc_dobj *dobj, *next_dobj;
int pass = SOC_TPLG_PASS_END;
@@ -2057,6 +2058,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
while (pass >= SOC_TPLG_PASS_START) {
/* remove mixer controls */
down_write(&card->controls_rwsem);
list_for_each_entry_safe(dobj, next_dobj, &comp->dobj_list,
list) {
@@ -2090,6 +2092,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
break;
}
}
up_write(&card->controls_rwsem);
pass--;
}