diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index f007cb671a47..cd58dd469ae7 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -229,7 +229,9 @@ Date: August 2017 Contact: "Jaegeuk Kim" Description: Do background GC agressively when set. When gc_urgent = 1, background thread starts to do GC by given gc_urgent_sleep_time - interval. It is set to 0 by default. + interval. When gc_urgent = 2, F2FS will lower the bar of + checking idle in order to process outstanding discard commands + and GC a little bit aggressively. It is set to 0 by default. What: /sys/fs/f2fs//gc_urgent_sleep_time Date: August 2017 diff --git a/Makefile b/Makefile index d990f9032319..fb6f3f7513e9 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 14 -SUBLEVEL = 186 +SUBLEVEL = 187 EXTRAVERSION = NAME = Petit Gorille diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi index 1792192001a2..e975f9cabe84 100644 --- a/arch/arm/boot/dts/bcm-nsp.dtsi +++ b/arch/arm/boot/dts/bcm-nsp.dtsi @@ -249,10 +249,10 @@ status = "disabled"; }; - mailbox: mailbox@25000 { + mailbox: mailbox@25c00 { compatible = "brcm,iproc-fa2-mbox"; - reg = <0x25000 0x445>; - interrupts = ; + reg = <0x25c00 0x400>; + interrupts = ; #mbox-cells = <1>; brcm,rx-status-len = <32>; brcm,use-bcm-hdr; diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c index 868781fd460c..14c630c899c5 100644 --- a/arch/arm/mach-imx/pm-imx5.c +++ b/arch/arm/mach-imx/pm-imx5.c @@ -301,14 +301,14 @@ static int __init imx_suspend_alloc_ocram( if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; - goto put_node; + goto put_device; } ocram_base = gen_pool_alloc(ocram_pool, size); if (!ocram_base) { pr_warn("%s: unable to alloc ocram!\n", __func__); ret = -ENOMEM; - goto put_node; + goto put_device; } phys = gen_pool_virt_to_phys(ocram_pool, ocram_base); @@ -318,6 +318,8 @@ static int __init imx_suspend_alloc_ocram( if (virt_out) *virt_out = virt; +put_device: + put_device(&pdev->dev); put_node: of_node_put(node); diff --git a/arch/arm64/boot/dts/qcom/qcs410-iot.dts b/arch/arm64/boot/dts/qcom/qcs410-iot.dts index bac1561fdc2a..2fe709638dbc 100644 --- a/arch/arm64/boot/dts/qcom/qcs410-iot.dts +++ b/arch/arm64/boot/dts/qcom/qcs410-iot.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -14,9 +14,14 @@ #include "qcs410.dtsi" #include "qcs410-iot.dtsi" +#include "sm6150-audio-overlay.dtsi" / { model = "Qualcomm Technologies, Inc. QCS410 IOT"; compatible = "qcom,qcs410-iot", "qcom,qcs410", "qcom,iot"; qcom,board-id = <32 0>; }; + +&sm6150_snd { + /delete-property/ fsa4480-i2c-handle; +}; diff --git a/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig b/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig index ace282a41b0b..bcd5327c39b1 100644 --- a/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig +++ b/arch/arm64/configs/vendor/sdmsteppe-perf_defconfig @@ -80,6 +80,7 @@ CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y CONFIG_KRYO_PMU_WORKAROUND=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_COMPAT=y +CONFIG_PM_AUTOSLEEP=y CONFIG_PM_WAKELOCKS=y CONFIG_PM_WAKELOCKS_LIMIT=0 # CONFIG_PM_WAKELOCKS_GC is not set diff --git a/arch/arm64/configs/vendor/sdmsteppe_defconfig b/arch/arm64/configs/vendor/sdmsteppe_defconfig index d796e2fe093e..462022d262ac 100644 --- a/arch/arm64/configs/vendor/sdmsteppe_defconfig +++ b/arch/arm64/configs/vendor/sdmsteppe_defconfig @@ -85,6 +85,7 @@ CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y CONFIG_KRYO_PMU_WORKAROUND=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_COMPAT=y +CONFIG_PM_AUTOSLEEP=y CONFIG_PM_WAKELOCKS=y CONFIG_PM_WAKELOCKS_LIMIT=0 # CONFIG_PM_WAKELOCKS_GC is not set diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c index 1d091d048d04..0819db91ca94 100644 --- a/arch/arm64/kernel/perf_regs.c +++ b/arch/arm64/kernel/perf_regs.c @@ -15,15 +15,34 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) return 0; /* - * Compat (i.e. 32 bit) mode: - * - PC has been set in the pt_regs struct in kernel_entry, - * - Handle SP and LR here. + * Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but + * we're stuck with it for ABI compatability reasons. + * + * For a 32-bit consumer inspecting a 32-bit task, then it will look at + * the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h). + * These correspond directly to a prefix of the registers saved in our + * 'struct pt_regs', with the exception of the PC, so we copy that down + * (x15 corresponds to SP_hyp in the architecture). + * + * So far, so good. + * + * The oddity arises when a 64-bit consumer looks at a 32-bit task and + * asks for registers beyond PERF_REG_ARM_MAX. In this case, we return + * SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and + * PC registers would normally live. The initial idea was to allow a + * 64-bit unwinder to unwind a 32-bit task and, although it's not clear + * how well that works in practice, somebody might be relying on it. + * + * At the time we make a sample, we don't know whether the consumer is + * 32-bit or 64-bit, so we have to cater for both possibilities. */ if (compat_user_mode(regs)) { if ((u32)idx == PERF_REG_ARM64_SP) return regs->compat_sp; if ((u32)idx == PERF_REG_ARM64_LR) return regs->compat_lr; + if (idx == 15) + return regs->pc; } if ((u32)idx == PERF_REG_ARM64_SP) diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 56e0190d6e65..42e4cd20fbbe 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -322,6 +322,25 @@ static inline void __poke_user_per(struct task_struct *child, child->thread.per_user.end = data; } +static void fixup_int_code(struct task_struct *child, addr_t data) +{ + struct pt_regs *regs = task_pt_regs(child); + int ilc = regs->int_code >> 16; + u16 insn; + + if (ilc > 6) + return; + + if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16), + &insn, sizeof(insn), FOLL_FORCE) != sizeof(insn)) + return; + + /* double check that tracee stopped on svc instruction */ + if ((insn >> 8) != 0xa) + return; + + regs->int_code = 0x20000 | (data & 0xffff); +} /* * Write a word to the user area of a process at location addr. This * operation does have an additional problem compared to peek_user. @@ -333,7 +352,9 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) struct user *dummy = NULL; addr_t offset; + if (addr < (addr_t) &dummy->regs.acrs) { + struct pt_regs *regs = task_pt_regs(child); /* * psw and gprs are stored on the stack */ @@ -351,7 +372,11 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) /* Invalid addressing mode bits */ return -EINVAL; } - *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; + + if (test_pt_regs_flag(regs, PIF_SYSCALL) && + addr == offsetof(struct user, regs.gprs[2])) + fixup_int_code(child, data); + *(addr_t *)((addr_t) ®s->psw + addr) = data; } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { /* @@ -717,6 +742,10 @@ static int __poke_user_compat(struct task_struct *child, regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | (__u64)(tmp & PSW32_ADDR_AMODE); } else { + + if (test_pt_regs_flag(regs, PIF_SYSCALL) && + addr == offsetof(struct compat_user, regs.gprs[2])) + fixup_int_code(child, data); /* gpr 0-15 */ *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp; } diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c index 60f7205ebe40..646dd58169ec 100644 --- a/arch/sparc/kernel/ptrace_32.c +++ b/arch/sparc/kernel/ptrace_32.c @@ -168,12 +168,17 @@ static int genregs32_set(struct task_struct *target, if (ret || !count) return ret; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - ®s->y, + ®s->npc, 34 * sizeof(u32), 35 * sizeof(u32)); if (ret || !count) return ret; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->y, + 35 * sizeof(u32), 36 * sizeof(u32)); + if (ret || !count) + return ret; return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, - 35 * sizeof(u32), 38 * sizeof(u32)); + 36 * sizeof(u32), 38 * sizeof(u32)); } static int fpregs32_get(struct task_struct *target, diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9529fe69e1d9..ecb6009a2c8a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1060,7 +1060,7 @@ struct kvm_x86_ops { void (*enable_log_dirty_pt_masked)(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t offset, unsigned long mask); - int (*write_log_dirty)(struct kvm_vcpu *vcpu); + int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa); /* pmu operations of sub-arch */ const struct kvm_pmu_ops *pmu_ops; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 082d0cea72f4..9df3d5d7214a 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1713,10 +1713,10 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, * Emulate arch specific page modification logging for the * nested hypervisor */ -int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu) +int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa) { if (kvm_x86_ops->write_log_dirty) - return kvm_x86_ops->write_log_dirty(vcpu); + return kvm_x86_ops->write_log_dirty(vcpu, l2_gpa); return 0; } diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 068feab64acf..816a626b6250 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -194,7 +194,7 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn); -int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu); +int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa); int kvm_mmu_post_init_vm(struct kvm *kvm); void kvm_mmu_pre_destroy_vm(struct kvm *kvm); diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 8cf7a09bdd73..7260a165488d 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -202,7 +202,7 @@ static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte) static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, struct guest_walker *walker, - int write_fault) + gpa_t addr, int write_fault) { unsigned level, index; pt_element_t pte, orig_pte; @@ -227,7 +227,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, !(pte & PT_GUEST_DIRTY_MASK)) { trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); #if PTTYPE == PTTYPE_EPT - if (kvm_arch_write_log_dirty(vcpu)) + if (kvm_arch_write_log_dirty(vcpu, addr)) return -EINVAL; #endif pte |= PT_GUEST_DIRTY_MASK; @@ -424,7 +424,8 @@ retry_walk: (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT); if (unlikely(!accessed_dirty)) { - ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault); + ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, + addr, write_fault); if (unlikely(ret < 0)) goto error; else if (ret) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 42c6ca05a613..11e683ec6c85 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -12462,11 +12462,10 @@ static void vmx_flush_log_dirty(struct kvm *kvm) kvm_flush_pml_buffers(kvm); } -static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) +static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa) { struct vmcs12 *vmcs12; struct vcpu_vmx *vmx = to_vmx(vcpu); - gpa_t gpa; struct page *page = NULL; u64 *pml_address; @@ -12487,7 +12486,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) return 1; } - gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull; + gpa &= ~0xFFFull; page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address); if (is_error_page(page)) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3401061a2231..0db7f5cd9c72 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2344,7 +2344,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return kvm_mtrr_set_msr(vcpu, msr, data); case MSR_IA32_APICBASE: return kvm_set_apic_base(vcpu, msr_info); - case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: + case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: return kvm_x2apic_msr_write(vcpu, msr, data); case MSR_IA32_TSCDEADLINE: kvm_set_lapic_tscdeadline_msr(vcpu, data); @@ -2629,7 +2629,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_APICBASE: msr_info->data = kvm_get_apic_base(vcpu); break; - case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: + case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); break; case MSR_IA32_TSCDEADLINE: diff --git a/block/bio-integrity.c b/block/bio-integrity.c index c17f62d6714d..626e39b57998 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -315,7 +315,6 @@ bool bio_integrity_prep(struct bio *bio) if (ret == 0) { printk(KERN_ERR "could not attach integrity payload\n"); - kfree(buf); status = BLK_STS_RESOURCE; goto err_end_io; } diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 19ed342f41ca..1828b335c28a 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -944,13 +944,13 @@ static void __exit interrupt_stats_exit(void) } static ssize_t -acpi_show_profile(struct device *dev, struct device_attribute *attr, +acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile); } -static const struct device_attribute pm_profile_attr = +static const struct kobj_attribute pm_profile_attr = __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL); static ssize_t hotplug_enabled_show(struct kobject *kobj, diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 2f81d6534270..bc2c27f0493f 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3996,12 +3996,13 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) { struct scsi_cmnd *scmd = qc->scsicmd; const u8 *cdb = scmd->cmnd; - const u8 *p; u8 pg, spg; unsigned six_byte, pg_len, hdr_len, bd_len; int len; u16 fp = (u16)-1; u8 bp = 0xff; + u8 buffer[64]; + const u8 *p = buffer; VPRINTK("ENTER\n"); @@ -4035,12 +4036,14 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len) goto invalid_param_len; - p = page_address(sg_page(scsi_sglist(scmd))); - /* Move past header and block descriptors. */ if (len < hdr_len) goto invalid_param_len; + if (!sg_copy_to_buffer(scsi_sglist(scmd), scsi_sg_count(scmd), + buffer, sizeof(buffer))) + goto invalid_param_len; + if (six_byte) bd_len = p[3]; else diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 39b119af65f7..2bbd6bed1535 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1112,7 +1112,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) if (lo->lo_offset != info->lo_offset || lo->lo_sizelimit != info->lo_sizelimit) { sync_blockdev(lo->lo_device); - kill_bdev(lo->lo_device); + invalidate_bdev(lo->lo_device); } /* I/O need to be drained during transfer transition */ @@ -1384,11 +1384,11 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg) return 0; sync_blockdev(lo->lo_device); - kill_bdev(lo->lo_device); + invalidate_bdev(lo->lo_device); blk_mq_freeze_queue(lo->lo_queue); - /* kill_bdev should have truncated all the pages */ + /* invalidate_bdev should have truncated all the pages */ if (lo->lo_device->bd_inode->i_mapping->nrpages) { err = -EAGAIN; pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c index 25a1706b6508..369aa81ab8c1 100644 --- a/drivers/char/diag/diag_dci.c +++ b/drivers/char/diag/diag_dci.c @@ -3170,6 +3170,7 @@ fail_alloc: kfree(new_entry); new_entry = NULL; } + put_task_struct(current); mutex_unlock(&driver->dci_mutex); put_task_struct(task_s); put_task_struct(task_s); diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c index 92c4fc187001..43cf043eb7d5 100644 --- a/drivers/char/diag/diag_usb.c +++ b/drivers/char/diag/diag_usb.c @@ -253,7 +253,8 @@ static void usb_disconnect_work_fn(struct work_struct *work) ch->name, atomic_read(&ch->disconnected), atomic_read(&ch->connected)); if (!atomic_read(&ch->connected) && - driver->usb_connected && diag_mask_param()) + driver->usb_connected && diag_mask_param() && + ch->id == DIAG_USB_LOCAL) diag_clear_masks(0); usb_disconnect(ch); diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index f3c28777b8c6..deb1d8f3bdc8 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -180,7 +180,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num) rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL, "entry%d", entry_num); if (rc) { - kfree(entry); + kobject_put(&entry->kobj); return rc; } } diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 9416e72f86aa..d491b3aa124f 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -2126,7 +2126,7 @@ static int ni_init_smc_spll_table(struct radeon_device *rdev) if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT)) ret = -EINVAL; - if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT)) + if (fb_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT)) ret = -EINVAL; if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT)) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index d901591db9c8..6e8af2b91492 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1482,6 +1482,8 @@ static struct rdma_id_private *cma_find_listener( { struct rdma_id_private *id_priv, *id_priv_dev; + lockdep_assert_held(&lock); + if (!bind_list) return ERR_PTR(-EINVAL); @@ -1530,6 +1532,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, } } + mutex_lock(&lock); /* * Net namespace might be getting deleted while route lookup, * cm_id lookup is in progress. Therefore, perform netdevice @@ -1571,6 +1574,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); err: rcu_read_unlock(); + mutex_unlock(&lock); if (IS_ERR(id_priv) && *net_dev) { dev_put(*net_dev); *net_dev = NULL; @@ -2287,6 +2291,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, struct net *net = id_priv->id.route.addr.dev_addr.net; int ret; + lockdep_assert_held(&lock); + if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) return; @@ -2993,6 +2999,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list, u64 sid, mask; __be16 port; + lockdep_assert_held(&lock); + addr = cma_src_addr(id_priv); port = htons(bind_list->port); @@ -3021,6 +3029,8 @@ static int cma_alloc_port(enum rdma_port_space ps, struct rdma_bind_list *bind_list; int ret; + lockdep_assert_held(&lock); + bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); if (!bind_list) return -ENOMEM; @@ -3047,6 +3057,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list, struct sockaddr *saddr = cma_src_addr(id_priv); __be16 dport = cma_port(daddr); + lockdep_assert_held(&lock); + hlist_for_each_entry(cur_id, &bind_list->owners, node) { struct sockaddr *cur_daddr = cma_dst_addr(cur_id); struct sockaddr *cur_saddr = cma_src_addr(cur_id); @@ -3086,6 +3098,8 @@ static int cma_alloc_any_port(enum rdma_port_space ps, unsigned int rover; struct net *net = id_priv->id.route.addr.dev_addr.net; + lockdep_assert_held(&lock); + inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; rover = prandom_u32() % remaining + low; @@ -3133,6 +3147,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list, struct rdma_id_private *cur_id; struct sockaddr *addr, *cur_addr; + lockdep_assert_held(&lock); + addr = cma_src_addr(id_priv); hlist_for_each_entry(cur_id, &bind_list->owners, node) { if (id_priv == cur_id) @@ -3163,6 +3179,8 @@ static int cma_use_port(enum rdma_port_space ps, unsigned short snum; int ret; + lockdep_assert_held(&lock); + snum = ntohs(cma_port(cma_src_addr(id_priv))); if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 6072ac7023cb..08d2e9cc28eb 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -2907,6 +2907,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, sg_list.addr))) { + kfree(mad_priv); ret = -ENOMEM; break; } diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 5557f1bd2356..03abfe8b9a58 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -2213,6 +2213,14 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, cfg->cbndx = ret; + if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_GEOMETRY))) { + /* Geometry is not set use the default geometry */ + domain->geometry.aperture_start = 0; + domain->geometry.aperture_end = (1UL << ias) - 1; + if (domain->geometry.aperture_end >= SZ_1G * 4ULL) + domain->geometry.aperture_end = (SZ_1G * 4ULL) - 1; + } + if (arm_smmu_is_slave_side_secure(smmu_domain)) { smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) { .quirks = quirks, @@ -2223,6 +2231,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, }, .tlb = tlb_ops, .iommu_dev = smmu->dev, + .iova_base = domain->geometry.aperture_start, + .iova_end = domain->geometry.aperture_end, }; fmt = ARM_MSM_SECURE; } else { @@ -2233,6 +2243,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, .oas = oas, .tlb = tlb_ops, .iommu_dev = smmu->dev, + .iova_base = domain->geometry.aperture_start, + .iova_end = domain->geometry.aperture_end, }; } @@ -3521,7 +3533,7 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, ret = -ENODEV; break; } - info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds; + info->ops = smmu_domain->pgtbl_ops; ret = 0; break; } @@ -3782,7 +3794,6 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, ret = 0; break; } - case DOMAIN_ATTR_CB_STALL_DISABLE: if (*((int *)data)) smmu_domain->attributes |= @@ -3795,6 +3806,44 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, 1 << DOMAIN_ATTR_NO_CFRE; ret = 0; break; + case DOMAIN_ATTR_GEOMETRY: { + struct iommu_domain_geometry *geometry = + (struct iommu_domain_geometry *)data; + + if (smmu_domain->smmu != NULL) { + dev_err(smmu_domain->smmu->dev, + "cannot set geometry attribute while attached\n"); + ret = -EBUSY; + break; + } + + if (geometry->aperture_start >= SZ_1G * 4ULL || + geometry->aperture_end >= SZ_1G * 4ULL) { + pr_err("fastmap does not support IOVAs >= 4GB\n"); + ret = -EINVAL; + break; + } + if (smmu_domain->attributes + & (1 << DOMAIN_ATTR_GEOMETRY)) { + if (geometry->aperture_start + < domain->geometry.aperture_start) + domain->geometry.aperture_start = + geometry->aperture_start; + + if (geometry->aperture_end + > domain->geometry.aperture_end) + domain->geometry.aperture_end = + geometry->aperture_end; + } else { + smmu_domain->attributes |= 1 << DOMAIN_ATTR_GEOMETRY; + domain->geometry.aperture_start = + geometry->aperture_start; + domain->geometry.aperture_end = geometry->aperture_end; + } + ret = 0; + break; + } + default: ret = -ENODEV; } diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c index 08deff94e783..ec4ba8670b9a 100644 --- a/drivers/iommu/dma-mapping-fast.c +++ b/drivers/iommu/dma-mapping-fast.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,6 +21,7 @@ #include #include #include +#include "io-pgtable.h" #include #include @@ -29,14 +30,6 @@ #define FAST_PAGE_SHIFT 12 #define FAST_PAGE_SIZE (1UL << FAST_PAGE_SHIFT) #define FAST_PAGE_MASK (~(PAGE_SIZE - 1)) -#define FAST_PTE_ADDR_MASK ((av8l_fast_iopte)0xfffffffff000) -#define FAST_MAIR_ATTR_IDX_CACHE 1 -#define FAST_PTE_ATTRINDX_SHIFT 2 -#define FAST_PTE_ATTRINDX_MASK 0x7 -#define FAST_PTE_SH_SHIFT 8 -#define FAST_PTE_SH_MASK (((av8l_fast_iopte)0x3) << FAST_PTE_SH_SHIFT) -#define FAST_PTE_SH_OS (((av8l_fast_iopte)2) << FAST_PTE_SH_SHIFT) -#define FAST_PTE_SH_IS (((av8l_fast_iopte)3) << FAST_PTE_SH_SHIFT) static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot, bool coherent) @@ -61,27 +54,6 @@ static int __get_iommu_pgprot(unsigned long attrs, int prot, return prot; } -static void fast_dmac_clean_range(struct dma_fast_smmu_mapping *mapping, - void *start, void *end) -{ - if (!mapping->is_smmu_pt_coherent) - dmac_clean_range(start, end); -} - -static bool __fast_is_pte_coherent(av8l_fast_iopte *ptep) -{ - int attr_idx = (*ptep & (FAST_PTE_ATTRINDX_MASK << - FAST_PTE_ATTRINDX_SHIFT)) >> - FAST_PTE_ATTRINDX_SHIFT; - - if ((attr_idx == FAST_MAIR_ATTR_IDX_CACHE) && - (((*ptep & FAST_PTE_SH_MASK) == FAST_PTE_SH_IS) || - (*ptep & FAST_PTE_SH_MASK) == FAST_PTE_SH_OS)) - return true; - - return false; -} - static bool is_dma_coherent(struct device *dev, unsigned long attrs) { bool is_coherent; @@ -201,7 +173,11 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping, iommu_tlbiall(mapping->domain); mapping->have_stale_tlbs = false; - av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, skip_sync); + av8l_fast_clear_stale_ptes(mapping->pgtbl_ops, + mapping->domain->geometry.aperture_start, + mapping->base, + mapping->base + mapping->size - 1, + skip_sync); } iova = (bit << FAST_PAGE_SHIFT) + mapping->base; @@ -374,12 +350,10 @@ static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page, struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast; dma_addr_t iova; unsigned long flags; - av8l_fast_iopte *pmd; phys_addr_t phys_plus_off = page_to_phys(page) + offset; phys_addr_t phys_to_map = round_down(phys_plus_off, FAST_PAGE_SIZE); unsigned long offset_from_phys_to_map = phys_plus_off & ~FAST_PAGE_MASK; size_t len = ALIGN(size + offset_from_phys_to_map, FAST_PAGE_SIZE); - int nptes = len >> FAST_PAGE_SHIFT; bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC); int prot = __fast_dma_direction_to_prot(dir); bool is_coherent = is_dma_coherent(dev, attrs); @@ -397,13 +371,10 @@ static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page, if (unlikely(iova == DMA_ERROR_CODE)) goto fail; - pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova); - - if (unlikely(av8l_fast_map_public(pmd, phys_to_map, len, prot))) + if (unlikely(av8l_fast_map_public(mapping->pgtbl_ops, iova, + phys_to_map, len, prot))) goto fail_free_iova; - fast_dmac_clean_range(mapping, pmd, pmd + nptes); - spin_unlock_irqrestore(&mapping->lock, flags); trace_map(mapping->domain, iova, phys_to_map, len, prot); @@ -422,20 +393,23 @@ static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova, { struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast; unsigned long flags; - av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova); unsigned long offset = iova & ~FAST_PAGE_MASK; size_t len = ALIGN(size + offset, FAST_PAGE_SIZE); - int nptes = len >> FAST_PAGE_SHIFT; - struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK)); bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC); bool is_coherent = is_dma_coherent(dev, attrs); - if (!skip_sync && !is_coherent) - __fast_dma_page_dev_to_cpu(page, offset, size, dir); + if (!skip_sync && !is_coherent) { + phys_addr_t phys; + + phys = av8l_fast_iova_to_phys_public(mapping->pgtbl_ops, iova); + WARN_ON(!phys); + + __fast_dma_page_dev_to_cpu(phys_to_page(phys), offset, + size, dir); + } spin_lock_irqsave(&mapping->lock, flags); - av8l_fast_unmap_public(pmd, len); - fast_dmac_clean_range(mapping, pmd, pmd + nptes); + av8l_fast_unmap_public(mapping->pgtbl_ops, iova, len); __fast_smmu_free_iova(mapping, iova - offset, len); spin_unlock_irqrestore(&mapping->lock, flags); @@ -446,24 +420,34 @@ static void fast_smmu_sync_single_for_cpu(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction dir) { struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast; - av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova); unsigned long offset = iova & ~FAST_PAGE_MASK; - struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK)); - if (!__fast_is_pte_coherent(pmd)) - __fast_dma_page_dev_to_cpu(page, offset, size, dir); + if (!av8l_fast_iova_coherent_public(mapping->pgtbl_ops, iova)) { + phys_addr_t phys; + + phys = av8l_fast_iova_to_phys_public(mapping->pgtbl_ops, iova); + WARN_ON(!phys); + + __fast_dma_page_dev_to_cpu(phys_to_page(phys), offset, + size, dir); + } } static void fast_smmu_sync_single_for_device(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction dir) { struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast; - av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova); unsigned long offset = iova & ~FAST_PAGE_MASK; - struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK)); - if (!__fast_is_pte_coherent(pmd)) - __fast_dma_page_cpu_to_dev(page, offset, size, dir); + if (!av8l_fast_iova_coherent_public(mapping->pgtbl_ops, iova)) { + phys_addr_t phys; + + phys = av8l_fast_iova_to_phys_public(mapping->pgtbl_ops, iova); + WARN_ON(!phys); + + __fast_dma_page_cpu_to_dev(phys_to_page(phys), offset, + size, dir); + } } static int fast_smmu_map_sg(struct device *dev, struct scatterlist *sg, @@ -538,7 +522,6 @@ static void *fast_smmu_alloc(struct device *dev, size_t size, struct sg_table sgt; dma_addr_t dma_addr, iova_iter; void *addr; - av8l_fast_iopte *ptep; unsigned long flags; struct sg_mapping_iter miter; size_t count = ALIGN(size, SZ_4K) >> PAGE_SHIFT; @@ -596,17 +579,14 @@ static void *fast_smmu_alloc(struct device *dev, size_t size, sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG | SG_MITER_ATOMIC); while (sg_miter_next(&miter)) { - int nptes = miter.length >> FAST_PAGE_SHIFT; - - ptep = iopte_pmd_offset(mapping->pgtbl_pmds, iova_iter); if (unlikely(av8l_fast_map_public( - ptep, page_to_phys(miter.page), + mapping->pgtbl_ops, iova_iter, + page_to_phys(miter.page), miter.length, prot))) { dev_err(dev, "no map public\n"); /* TODO: unwind previously successful mappings */ goto out_free_iova; } - fast_dmac_clean_range(mapping, ptep, ptep + nptes); iova_iter += miter.length; } sg_miter_stop(&miter); @@ -626,9 +606,7 @@ static void *fast_smmu_alloc(struct device *dev, size_t size, out_unmap: /* need to take the lock again for page tables and iova */ spin_lock_irqsave(&mapping->lock, flags); - ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_addr); - av8l_fast_unmap_public(ptep, size); - fast_dmac_clean_range(mapping, ptep, ptep + count); + av8l_fast_unmap_public(mapping->pgtbl_ops, dma_addr, size); out_free_iova: __fast_smmu_free_iova(mapping, dma_addr, size); spin_unlock_irqrestore(&mapping->lock, flags); @@ -647,7 +625,6 @@ static void fast_smmu_free(struct device *dev, size_t size, struct vm_struct *area; struct page **pages; size_t count = ALIGN(size, SZ_4K) >> FAST_PAGE_SHIFT; - av8l_fast_iopte *ptep; unsigned long flags; size = ALIGN(size, SZ_4K); @@ -658,10 +635,8 @@ static void fast_smmu_free(struct device *dev, size_t size, pages = area->pages; dma_common_free_remap(vaddr, size, VM_USERMAP, false); - ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_handle); spin_lock_irqsave(&mapping->lock, flags); - av8l_fast_unmap_public(ptep, size); - fast_dmac_clean_range(mapping, ptep, ptep + count); + av8l_fast_unmap_public(mapping->pgtbl_ops, dma_handle, size); __fast_smmu_free_iova(mapping, dma_handle, size); spin_unlock_irqrestore(&mapping->lock, flags); __fast_smmu_free_pages(pages, count); @@ -767,16 +742,20 @@ static int fast_smmu_mapping_error(struct device *dev, static void __fast_smmu_mapped_over_stale(struct dma_fast_smmu_mapping *fast, void *data) { - av8l_fast_iopte *ptep = data; + av8l_fast_iopte *pmds, *ptep = data; dma_addr_t iova; unsigned long bitmap_idx; + struct io_pgtable *tbl; - bitmap_idx = (unsigned long)(ptep - fast->pgtbl_pmds); + tbl = container_of(fast->pgtbl_ops, struct io_pgtable, ops); + pmds = tbl->cfg.av8l_fast_cfg.pmds; + + bitmap_idx = (unsigned long)(ptep - pmds); iova = bitmap_idx << FAST_PAGE_SHIFT; dev_err(fast->dev, "Mapped over stale tlb at %pa\n", &iova); dev_err(fast->dev, "bitmap (failure at idx %lu):\n", bitmap_idx); dev_err(fast->dev, "ptep: %p pmds: %p diff: %lu\n", ptep, - fast->pgtbl_pmds, bitmap_idx); + pmds, bitmap_idx); print_hex_dump(KERN_ERR, "bmap: ", DUMP_PREFIX_ADDRESS, 32, 8, fast->bitmap, fast->bitmap_size, false); } @@ -822,7 +801,7 @@ static const struct dma_map_ops fast_smmu_dma_ops = { * * Creates a mapping structure which holds information about used/unused IO * address ranges, which is required to perform mapping with IOMMU aware - * functions. The only VA range supported is [0, 4GB). + * functions. The only VA range supported is [0, 4GB]. * * The client device need to be attached to the mapping with * fast_smmu_attach_device function. @@ -957,19 +936,16 @@ int fast_smmu_init_mapping(struct device *dev, fast_smmu_reserve_pci_windows(dev, mapping->fast); + domain->geometry.aperture_start = mapping->base; + domain->geometry.aperture_end = mapping->base + size - 1; + if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PGTBL_INFO, &info)) { dev_err(dev, "Couldn't get page table info\n"); err = -EINVAL; goto release_mapping; } - mapping->fast->pgtbl_pmds = info.pmds; - - if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT, - &mapping->fast->is_smmu_pt_coherent)) { - err = -EINVAL; - goto release_mapping; - } + mapping->fast->pgtbl_ops = (struct io_pgtable_ops *)info.ops; mapping->fast->notifier.notifier_call = fast_smmu_notify; av8l_register_notify(&mapping->fast->notifier); diff --git a/drivers/iommu/io-pgtable-fast.c b/drivers/iommu/io-pgtable-fast.c index bf34c646fe57..5a5d597a00a6 100644 --- a/drivers/iommu/io-pgtable-fast.c +++ b/drivers/iommu/io-pgtable-fast.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -42,6 +43,10 @@ struct av8l_fast_io_pgtable { av8l_fast_iopte *puds[4]; av8l_fast_iopte *pmds; struct page **pages; /* page table memory */ + int nr_pages; + dma_addr_t base; + dma_addr_t start; + dma_addr_t end; }; /* Page table bits */ @@ -58,6 +63,7 @@ struct av8l_fast_io_pgtable { #define AV8L_FAST_PTE_SH_NS (((av8l_fast_iopte)0) << 8) #define AV8L_FAST_PTE_SH_OS (((av8l_fast_iopte)2) << 8) #define AV8L_FAST_PTE_SH_IS (((av8l_fast_iopte)3) << 8) +#define AV8L_FAST_PTE_SH_MASK (((av8l_fast_iopte)3) << 8) #define AV8L_FAST_PTE_NS (((av8l_fast_iopte)1) << 5) #define AV8L_FAST_PTE_VALID (((av8l_fast_iopte)1) << 0) @@ -75,6 +81,7 @@ struct av8l_fast_io_pgtable { #define AV8L_FAST_PTE_AP_PRIV_RO (((av8l_fast_iopte)2) << 6) #define AV8L_FAST_PTE_AP_RO (((av8l_fast_iopte)3) << 6) #define AV8L_FAST_PTE_ATTRINDX_SHIFT 2 +#define AV8L_FAST_PTE_ATTRINDX_MASK 0x7 #define AV8L_FAST_PTE_nG (((av8l_fast_iopte)1) << 11) /* Stage-2 PTE */ @@ -142,6 +149,13 @@ struct av8l_fast_io_pgtable { #define AV8L_FAST_PAGE_SHIFT 12 +#define PTE_MAIR_IDX(pte) \ + ((pte >> AV8L_FAST_PTE_ATTRINDX_SHIFT) & \ + AV8L_FAST_PTE_ATTRINDX_MASK) + +#define PTE_SH_IDX(pte) (pte & AV8L_FAST_PTE_SH_MASK) + +#define iopte_pmd_offset(pmds, base, iova) (pmds + ((iova - base) >> 12)) #ifdef CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB @@ -170,12 +184,15 @@ static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep) } } -void av8l_fast_clear_stale_ptes(av8l_fast_iopte *pmds, bool skip_sync) +void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, u64 base, + u64 start, u64 end, bool skip_sync) { int i; - av8l_fast_iopte *pmdp = pmds; + struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops); + av8l_fast_iopte *pmdp = iopte_pmd_offset(pmds, base, start); - for (i = 0; i < ((SZ_1G * 4UL) >> AV8L_FAST_PAGE_SHIFT); ++i) { + for (i = start >> AV8L_FAST_PAGE_SHIFT; + i <= (end >> AV8L_FAST_PAGE_SHIFT); ++i) { if (!(*pmdp & AV8L_FAST_PTE_VALID)) { *pmdp = 0; if (!skip_sync) @@ -190,11 +207,18 @@ static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep) } #endif -/* caller must take care of cache maintenance on *ptep */ -int av8l_fast_map_public(av8l_fast_iopte *ptep, phys_addr_t paddr, size_t size, - int prot) +static void av8l_clean_range(struct io_pgtable_ops *ops, + av8l_fast_iopte *start, av8l_fast_iopte *end) +{ + struct io_pgtable *iop = iof_pgtable_ops_to_pgtable(ops); + + if (!(iop->cfg.quirks & IO_PGTABLE_QUIRK_NO_DMA)) + dmac_clean_range(start, end); +} + +static av8l_fast_iopte +av8l_fast_prot_to_pte(struct av8l_fast_io_pgtable *data, int prot) { - int i, nptes = size >> AV8L_FAST_PAGE_SHIFT; av8l_fast_iopte pte = AV8L_FAST_PTE_XN | AV8L_FAST_PTE_TYPE_PAGE | AV8L_FAST_PTE_AF @@ -216,58 +240,67 @@ int av8l_fast_map_public(av8l_fast_iopte *ptep, phys_addr_t paddr, size_t size, else pte |= AV8L_FAST_PTE_AP_RW; - paddr &= AV8L_FAST_PTE_ADDR_MASK; - for (i = 0; i < nptes; i++, paddr += SZ_4K) { - __av8l_check_for_stale_tlb(ptep + i); - *(ptep + i) = pte | paddr; - } - - return 0; + return pte; } static int av8l_fast_map(struct io_pgtable_ops *ops, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops); - av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova); - unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT; + av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, data->base, iova); + unsigned long i, nptes = size >> AV8L_FAST_PAGE_SHIFT; + av8l_fast_iopte pte; - av8l_fast_map_public(ptep, paddr, size, prot); - dmac_clean_range(ptep, ptep + nptes); + pte = av8l_fast_prot_to_pte(data, prot); + paddr &= AV8L_FAST_PTE_ADDR_MASK; + for (i = 0; i < nptes; i++, paddr += SZ_4K) { + __av8l_check_for_stale_tlb(ptep + i); + *(ptep + i) = pte | paddr; + } + av8l_clean_range(ops, ptep, ptep + nptes); return 0; } -static void __av8l_fast_unmap(av8l_fast_iopte *ptep, size_t size, - bool need_stale_tlb_tracking) +int av8l_fast_map_public(struct io_pgtable_ops *ops, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) { - unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT; - int val = need_stale_tlb_tracking + return av8l_fast_map(ops, iova, paddr, size, prot); +} + +static size_t +__av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova, + size_t size, bool allow_stale_tlb) +{ + struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops); + unsigned long nptes; + av8l_fast_iopte *ptep; + int val = allow_stale_tlb ? AV8L_FAST_PTE_UNMAPPED_NEED_TLBI : 0; + ptep = iopte_pmd_offset(data->pmds, data->base, iova); + nptes = size >> AV8L_FAST_PAGE_SHIFT; + memset(ptep, val, sizeof(*ptep) * nptes); + av8l_clean_range(ops, ptep, ptep + nptes); + if (!allow_stale_tlb) + io_pgtable_tlb_flush_all(&data->iop); + + return size; } -/* caller must take care of cache maintenance on *ptep */ -void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size) +/* caller must take care of tlb cache maintenance */ +void av8l_fast_unmap_public(struct io_pgtable_ops *ops, unsigned long iova, + size_t size) { - __av8l_fast_unmap(ptep, size, true); + __av8l_fast_unmap(ops, iova, size, true); } static size_t av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova, size_t size) { - struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops); - struct io_pgtable *iop = &data->iop; - av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova); - unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT; - - __av8l_fast_unmap(ptep, size, false); - dmac_clean_range(ptep, ptep + nptes); - io_pgtable_tlb_flush_all(iop); - - return size; + return __av8l_fast_unmap(ops, iova, size, false); } #if defined(CONFIG_ARM64) @@ -312,6 +345,12 @@ static phys_addr_t av8l_fast_iova_to_phys(struct io_pgtable_ops *ops, return phys | (iova & 0xfff); } +phys_addr_t av8l_fast_iova_to_phys_public(struct io_pgtable_ops *ops, + unsigned long iova) +{ + return av8l_fast_iova_to_phys(ops, iova); +} + static int av8l_fast_map_sg(struct io_pgtable_ops *ops, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot, size_t *size) @@ -319,6 +358,23 @@ static int av8l_fast_map_sg(struct io_pgtable_ops *ops, unsigned long iova, return -ENODEV; } +static bool av8l_fast_iova_coherent(struct io_pgtable_ops *ops, + unsigned long iova) +{ + struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops); + av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, data->base, iova); + + return ((PTE_MAIR_IDX(*ptep) == AV8L_FAST_MAIR_ATTR_IDX_CACHE) && + ((PTE_SH_IDX(*ptep) == AV8L_FAST_PTE_SH_OS) || + (PTE_SH_IDX(*ptep) == AV8L_FAST_PTE_SH_IS))); +} + +bool av8l_fast_iova_coherent_public(struct io_pgtable_ops *ops, + unsigned long iova) +{ + return av8l_fast_iova_coherent(ops, iova); +} + static struct av8l_fast_io_pgtable * av8l_fast_alloc_pgtable_data(struct io_pgtable_cfg *cfg) { @@ -333,13 +389,14 @@ av8l_fast_alloc_pgtable_data(struct io_pgtable_cfg *cfg) .map_sg = av8l_fast_map_sg, .unmap = av8l_fast_unmap, .iova_to_phys = av8l_fast_iova_to_phys, + .is_iova_coherent = av8l_fast_iova_coherent, }; return data; } /* - * We need 1 page for the pgd, 4 pages for puds (1GB VA per pud page) and + * We need max 1 page for the pgd, 4 pages for puds (1GB VA per pud page) and * 2048 pages for pmds (each pud page contains 512 table entries, each * pointing to a pmd). */ @@ -348,12 +405,38 @@ av8l_fast_alloc_pgtable_data(struct io_pgtable_cfg *cfg) #define NUM_PMD_PAGES 2048 #define NUM_PGTBL_PAGES (NUM_PGD_PAGES + NUM_PUD_PAGES + NUM_PMD_PAGES) +/* undefine arch specific definitions which depends on page table format */ +#undef pud_index +#undef pud_mask +#undef pud_next +#undef pmd_index +#undef pmd_mask +#undef pmd_next + +#define pud_index(addr) (((addr) >> 30) & 0x3) +#define pud_mask(addr) ((addr) & ~((1UL << 30) - 1)) +#define pud_next(addr, end) \ +({ unsigned long __boundary = pud_mask(addr + (1UL << 30));\ + (__boundary - 1 < (end) - 1) ? __boundary : (end); \ +}) + +#define pmd_index(addr) (((addr) >> 21) & 0x1ff) +#define pmd_mask(addr) ((addr) & ~((1UL << 21) - 1)) +#define pmd_next(addr, end) \ +({ unsigned long __boundary = pmd_mask(addr + (1UL << 21));\ + (__boundary - 1 < (end) - 1) ? __boundary : (end); \ +}) + static int av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data, struct io_pgtable_cfg *cfg, void *cookie) { int i, j, pg = 0; struct page **pages, *page; + dma_addr_t base = cfg->iova_base; + dma_addr_t end = cfg->iova_end; + dma_addr_t pud, pmd; + int pmd_pg_index; pages = kmalloc(sizeof(*pages) * NUM_PGTBL_PAGES, __GFP_NOWARN | __GFP_NORETRY); @@ -371,10 +454,11 @@ av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data, data->pgd = page_address(page); /* - * We need 2048 entries at level 2 to map 4GB of VA space. A page - * can hold 512 entries, so we need 4 pages. + * We need max 2048 entries at level 2 to map 4GB of VA space. A page + * can hold 512 entries, so we need max 4 pages. */ - for (i = 0; i < 4; ++i) { + for (i = pud_index(base), pud = base; pud < end; + ++i, pud = pud_next(pud, end)) { av8l_fast_iopte pte, *ptep; page = alloc_page(GFP_KERNEL | __GFP_ZERO); @@ -389,12 +473,15 @@ av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data, dmac_clean_range(data->pgd, data->pgd + 4); /* - * We have 4 puds, each of which can point to 512 pmds, so we'll - * have 2048 pmds, each of which can hold 512 ptes, for a grand + * We have max 4 puds, each of which can point to 512 pmds, so we'll + * have max 2048 pmds, each of which can hold 512 ptes, for a grand * total of 2048*512=1048576 PTEs. */ - for (i = 0; i < 4; ++i) { - for (j = 0; j < 512; ++j) { + pmd_pg_index = pg; + for (i = pud_index(base), pud = base; pud < end; + ++i, pud = pud_next(pud, end)) { + for (j = pmd_index(pud), pmd = pud; pmd < pud_next(pud, end); + ++j, pmd = pmd_next(pmd, end)) { av8l_fast_iopte pte, *pudp; void *addr; @@ -413,21 +500,21 @@ av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data, dmac_clean_range(data->puds[i], data->puds[i] + 512); } - if (WARN_ON(pg != NUM_PGTBL_PAGES)) - goto err_free_pages; - /* * We map the pmds into a virtually contiguous space so that we * don't have to traverse the first two levels of the page tables * to find the appropriate pud. Instead, it will be a simple * offset from the virtual base of the pmds. */ - data->pmds = vmap(&pages[NUM_PGD_PAGES + NUM_PUD_PAGES], NUM_PMD_PAGES, + data->pmds = vmap(&pages[pmd_pg_index], pg - pmd_pg_index, VM_IOREMAP, PAGE_KERNEL); if (!data->pmds) goto err_free_pages; data->pages = pages; + data->nr_pages = pg; + data->base = base; + data->end = end; return 0; err_free_pages: @@ -533,7 +620,7 @@ static void av8l_fast_free_pgtable(struct io_pgtable *iop) struct av8l_fast_io_pgtable *data = iof_pgtable_to_data(iop); vunmap(data->pmds); - for (i = 0; i < NUM_PGTBL_PAGES; ++i) + for (i = 0; i < data->nr_pages; ++i) __free_page(data->pages[i]); kvfree(data->pages); kfree(data); @@ -605,6 +692,7 @@ static int __init av8l_fast_positive_testing(void) struct av8l_fast_io_pgtable *data; av8l_fast_iopte *pmds; u64 max = SZ_1G * 4ULL - 1; + u64 base = 0; cfg = (struct io_pgtable_cfg) { .quirks = 0, @@ -612,6 +700,8 @@ static int __init av8l_fast_positive_testing(void) .ias = 32, .oas = 32, .pgsize_bitmap = SZ_4K, + .iova_base = base, + .iova_end = max, }; cfg_cookie = &cfg; @@ -624,81 +714,81 @@ static int __init av8l_fast_positive_testing(void) pmds = data->pmds; /* map the entire 4GB VA space with 4K map calls */ - for (iova = 0; iova < max; iova += SZ_4K) { + for (iova = base; iova < max; iova += SZ_4K) { if (WARN_ON(ops->map(ops, iova, iova, SZ_4K, IOMMU_READ))) { failed++; continue; } } - if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0, - max))) + if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base, + base, max - base))) failed++; /* unmap it all */ - for (iova = 0; iova < max; iova += SZ_4K) { + for (iova = base; iova < max; iova += SZ_4K) { if (WARN_ON(ops->unmap(ops, iova, SZ_4K) != SZ_4K)) failed++; } /* sweep up TLB proving PTEs */ - av8l_fast_clear_stale_ptes(pmds, false); + av8l_fast_clear_stale_ptes(ops, base, base, max, false); /* map the entire 4GB VA space with 8K map calls */ - for (iova = 0; iova < max; iova += SZ_8K) { + for (iova = base; iova < max; iova += SZ_8K) { if (WARN_ON(ops->map(ops, iova, iova, SZ_8K, IOMMU_READ))) { failed++; continue; } } - if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0, - max))) + if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base, + base, max - base))) failed++; /* unmap it all with 8K unmap calls */ - for (iova = 0; iova < max; iova += SZ_8K) { + for (iova = base; iova < max; iova += SZ_8K) { if (WARN_ON(ops->unmap(ops, iova, SZ_8K) != SZ_8K)) failed++; } /* sweep up TLB proving PTEs */ - av8l_fast_clear_stale_ptes(pmds, false); + av8l_fast_clear_stale_ptes(ops, base, base, max, false); /* map the entire 4GB VA space with 16K map calls */ - for (iova = 0; iova < max; iova += SZ_16K) { + for (iova = base; iova < max; iova += SZ_16K) { if (WARN_ON(ops->map(ops, iova, iova, SZ_16K, IOMMU_READ))) { failed++; continue; } } - if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0, - max))) + if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base, + base, max - base))) failed++; /* unmap it all */ - for (iova = 0; iova < max; iova += SZ_16K) { + for (iova = base; iova < max; iova += SZ_16K) { if (WARN_ON(ops->unmap(ops, iova, SZ_16K) != SZ_16K)) failed++; } /* sweep up TLB proving PTEs */ - av8l_fast_clear_stale_ptes(pmds, false); + av8l_fast_clear_stale_ptes(ops, base, base, max, false); /* map the entire 4GB VA space with 64K map calls */ - for (iova = 0; iova < max; iova += SZ_64K) { + for (iova = base; iova < max; iova += SZ_64K) { if (WARN_ON(ops->map(ops, iova, iova, SZ_64K, IOMMU_READ))) { failed++; continue; } } - if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0, - max))) + if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base, + base, max - base))) failed++; /* unmap it all at once */ - if (WARN_ON(ops->unmap(ops, 0, max) != max)) + if (WARN_ON(ops->unmap(ops, base, max - base) != (max - base))) failed++; free_io_pgtable_ops(ops); diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h index 8e03a2c37780..294b9aea0b8b 100644 --- a/drivers/iommu/io-pgtable.h +++ b/drivers/iommu/io-pgtable.h @@ -114,6 +114,8 @@ struct io_pgtable_cfg { unsigned int oas; const struct iommu_gather_ops *tlb; struct device *iommu_dev; + dma_addr_t iova_base; + dma_addr_t iova_end; /* Low-level data specific to the table format */ union { diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c index fa0fb46489f9..d6157ad37dd4 100644 --- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c +++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c @@ -340,6 +340,23 @@ put: } } +static void __cam_isp_ctx_dequeue_request(struct cam_context *ctx, + struct cam_ctx_request *req) +{ + struct cam_ctx_request *req_current; + struct cam_ctx_request *req_prev; + + spin_lock_bh(&ctx->lock); + list_for_each_entry_safe_reverse(req_current, req_prev, + &ctx->pending_req_list, list) { + if (req->request_id == req_current->request_id) { + list_del_init(&req_current->list); + break; + } + } + spin_unlock_bh(&ctx->lock); +} + static int __cam_isp_ctx_enqueue_request_in_order( struct cam_context *ctx, struct cam_ctx_request *req) { @@ -3433,13 +3450,12 @@ static int __cam_isp_ctx_config_dev_in_top_state( add_req.dev_hdl = ctx->dev_hdl; add_req.req_id = req->request_id; add_req.skip_before_applying = 0; + __cam_isp_ctx_enqueue_request_in_order(ctx, req); rc = ctx->ctx_crm_intf->add_req(&add_req); if (rc) { CAM_ERR(CAM_ISP, "Add req failed: req id=%llu", req->request_id); - } else { - __cam_isp_ctx_enqueue_request_in_order( - ctx, req); + __cam_isp_ctx_dequeue_request(ctx, req); } } else { rc = -EINVAL; diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c index 0d24bad74d36..f1ba6b0e79e3 100644 --- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c +++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c @@ -2499,7 +2499,6 @@ static int cam_req_mgr_cb_notify_trigger( struct crm_task_payload *task_data; bool send_sof = true; int i = 0; - int64_t sof_time_diff = 0; if (!trigger_data) { CAM_ERR(CAM_CRM, "sof_data is NULL"); @@ -2519,10 +2518,6 @@ static int cam_req_mgr_cb_notify_trigger( if (link->dev_sof_evt[i].dev_hdl == trigger_data->dev_hdl) { if (link->dev_sof_evt[i].sof_done == false) { link->dev_sof_evt[i].sof_done = true; - link->dev_sof_evt[i].frame_id = - trigger_data->frame_id; - link->dev_sof_evt[i].timestamp = - trigger_data->sof_timestamp_val; } else CAM_INFO(CAM_CRM, "Received Spurious SOF"); } else if (link->dev_sof_evt[i].sof_done == false) { @@ -2532,23 +2527,6 @@ static int cam_req_mgr_cb_notify_trigger( if (!send_sof) return 0; - if (link->num_sof_src > 1) { - for (i = 0; i < (link->num_sof_src - 1); i++) { - if (link->dev_sof_evt[i].timestamp >= - link->dev_sof_evt[i+1].timestamp) { - sof_time_diff = link->dev_sof_evt[i].timestamp - - link->dev_sof_evt[i+1].timestamp; - } else { - sof_time_diff = - link->dev_sof_evt[i+1].timestamp - - link->dev_sof_evt[i].timestamp; - } - if ((link->dev_sof_evt[i].frame_id != - link->dev_sof_evt[i+1].frame_id) || - sof_time_diff > TIMESTAMP_DIFF_THRESHOLD) - return 0; - } - } for (i = 0; i < link->num_sof_src; i++) link->dev_sof_evt[i].sof_done = false; diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 5e5022fa1d04..85029d43da75 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1250,8 +1250,12 @@ out_disable_adv_intr: static void __alx_stop(struct alx_priv *alx) { - alx_halt(alx); alx_free_irq(alx); + + cancel_work_sync(&alx->link_check_wk); + cancel_work_sync(&alx->reset_wk); + + alx_halt(alx); alx_free_rings(alx); alx_free_napis(alx); } @@ -1863,9 +1867,6 @@ static void alx_remove(struct pci_dev *pdev) struct alx_priv *alx = pci_get_drvdata(pdev); struct alx_hw *hw = &alx->hw; - cancel_work_sync(&alx->link_check_wk); - cancel_work_sync(&alx->reset_wk); - /* restore permanent mac address */ alx_set_macaddr(hw, hw->perm_addr); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 7d3cbbd88a00..8bfa2523e253 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1567,11 +1567,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) goto out; } - if (skb_padto(skb, ETH_ZLEN)) { - ret = NETDEV_TX_OK; - goto out; - } - /* Retain how many bytes will be sent on the wire, without TSB inserted * by transmit checksum offload */ @@ -1621,6 +1616,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) len_stat = (size << DMA_BUFLENGTH_SHIFT) | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); + /* Note: if we ever change from DMA_TX_APPEND_CRC below we + * will need to restore software padding of "runt" packets + */ if (!i) { len_stat |= DMA_TX_APPEND_CRC | DMA_SOP; if (skb->ip_summed == CHECKSUM_PARTIAL) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index bc0221eafe5c..e40d31b40525 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -18179,8 +18179,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, rtnl_lock(); - /* We probably don't have netdev yet */ - if (!netdev || !netif_running(netdev)) + /* Could be second call or maybe we don't have netdev yet */ + if (!netdev || tp->pcierr_recovery || !netif_running(netdev)) goto done; /* We needn't recover from permanent error */ diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 880d925438c1..b43aebfc7f5b 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1695,7 +1695,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) } netdev->min_mtu = IBMVETH_MIN_MTU; - netdev->max_mtu = ETH_MAX_MTU; + netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH; memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 27ba476f761d..4fc3468f6f38 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -396,7 +396,7 @@ static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; } - iids->vf_cids += vf_cids * p_mngr->vf_count; + iids->vf_cids = vf_cids; iids->tids += vf_tids * p_mngr->vf_count; DP_VERBOSE(p_hwfn, QED_MSG_ILT, diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index a2a9921b467b..693f2a039383 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -81,12 +81,17 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status) mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); } +#define QED_VF_CHANNEL_USLEEP_ITERATIONS 90 +#define QED_VF_CHANNEL_USLEEP_DELAY 100 +#define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10 +#define QED_VF_CHANNEL_MSLEEP_DELAY 25 + static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) { union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; struct ustorm_trigger_vf_zone trigger; struct ustorm_vf_zone *zone_data; - int rc = 0, time = 100; + int iter, rc = 0; zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; @@ -126,11 +131,19 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); /* When PF would be done with the response, it would write back to the - * `done' address. Poll until then. + * `done' address from a coherent DMA zone. Poll until then. */ - while ((!*done) && time) { - msleep(25); - time--; + + iter = QED_VF_CHANNEL_USLEEP_ITERATIONS; + while (!*done && iter--) { + udelay(QED_VF_CHANNEL_USLEEP_DELAY); + dma_rmb(); + } + + iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS; + while (!*done && iter--) { + msleep(QED_VF_CHANNEL_MSLEEP_DELAY); + dma_rmb(); } if (!*done) { diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 056cb6093630..8ad05e500829 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -651,10 +651,10 @@ static int rocker_dma_rings_init(struct rocker *rocker) err_dma_event_ring_bufs_alloc: rocker_dma_ring_destroy(rocker, &rocker->event_ring); err_dma_event_ring_create: + rocker_dma_cmd_ring_waits_free(rocker); +err_dma_cmd_ring_waits_alloc: rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, PCI_DMA_BIDIRECTIONAL); -err_dma_cmd_ring_waits_alloc: - rocker_dma_cmd_ring_waits_free(rocker); err_dma_cmd_ring_bufs_alloc: rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); return err; diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index c5884c5f0489..5a2cdd2ccce6 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1400,10 +1400,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) } if (pkt_cnt == 0) { - /* Skip IP alignment psudo header */ - skb_pull(skb, 2); skb->len = pkt_len; - skb_set_tail_pointer(skb, pkt_len); + /* Skip IP alignment pseudo header */ + skb_pull(skb, 2); + skb_set_tail_pointer(skb, skb->len); skb->truesize = pkt_len + sizeof(struct sk_buff); ax88179_rx_checksum(skb, pkt_hdr); return 1; @@ -1412,8 +1412,9 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) ax_skb = skb_clone(skb, GFP_ATOMIC); if (ax_skb) { ax_skb->len = pkt_len; - ax_skb->data = skb->data + 2; - skb_set_tail_pointer(ax_skb, pkt_len); + /* Skip IP alignment pseudo header */ + skb_pull(ax_skb, 2); + skb_set_tail_pointer(ax_skb, ax_skb->len); ax_skb->truesize = pkt_len + sizeof(struct sk_buff); ax88179_rx_checksum(ax_skb, pkt_hdr); usbnet_skb_return(dev, ax_skb); diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 9654898f3e51..6748e82c6352 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -449,7 +449,8 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, /* * vendor strings must be an exact match */ - if (vmax != strlen(devinfo->vendor) || + if (vmax != strnlen(devinfo->vendor, + sizeof(devinfo->vendor)) || memcmp(devinfo->vendor, vskip, vmax)) continue; @@ -457,7 +458,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, * @model specifies the full string, and * must be larger or equal to devinfo->model */ - mlen = strlen(devinfo->model); + mlen = strnlen(devinfo->model, sizeof(devinfo->model)); if (mmax < mlen || memcmp(devinfo->model, mskip, mlen)) continue; return devinfo; diff --git a/drivers/staging/fw-api/fw/htt_stats.h b/drivers/staging/fw-api/fw/htt_stats.h index 9c262820c984..9a10f1f88119 100644 --- a/drivers/staging/fw-api/fw/htt_stats.h +++ b/drivers/staging/fw-api/fw/htt_stats.h @@ -771,6 +771,10 @@ typedef struct { A_UINT32 num_mu_peer_blacklisted; /* Num of times mu_ofdma seq posted */ A_UINT32 mu_ofdma_seq_posted; + /* Num of times UL MU MIMO seq posted */ + A_UINT32 ul_mumimo_seq_posted; + /* Num of times UL OFDMA seq posted */ + A_UINT32 ul_ofdma_seq_posted; } htt_tx_pdev_stats_cmn_tlv; #define HTT_TX_PDEV_STATS_URRN_TLV_SZ(_num_elems) (sizeof(A_UINT32) * (_num_elems)) @@ -2669,6 +2673,9 @@ typedef struct { A_UINT32 desc_threshold; A_UINT32 hwsch_tqm_invalid_status; A_UINT32 missed_tqm_gen_mpdus; + A_UINT32 tqm_active_tids; + A_UINT32 tqm_inactive_tids; + A_UINT32 tqm_active_msduq_flows; } htt_tx_tqm_cmn_stats_tlv; typedef struct { @@ -3589,6 +3596,8 @@ typedef struct { #define HTT_RX_PDEV_STATS_NUM_GI_COUNTERS 4 #define HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS 5 #define HTT_RX_PDEV_STATS_NUM_BW_COUNTERS 4 +#define HTT_RX_PDEV_STATS_TOTAL_BW_COUNTERS \ + (HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS + HTT_RX_PDEV_STATS_NUM_BW_COUNTERS) #define HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS 8 #define HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS 8 #define HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT @@ -3894,6 +3903,15 @@ typedef struct { /* Stats for MCS 12/13 */ A_UINT32 ul_mumimo_rx_mcs_ext[HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS]; A_UINT32 ul_mumimo_rx_gi_ext[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS][HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS]; + + /* RSSI in dBm for Rx TB PPDUs */ + A_INT8 rx_ul_mumimo_chain_rssi_in_dbm[HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS][HTT_RX_PDEV_STATS_TOTAL_BW_COUNTERS]; + /* Target RSSI stats for UL MUMIMO triggers. Units dBm */ + A_INT8 rx_ul_mumimo_target_rssi[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS][HTT_RX_PDEV_STATS_NUM_BW_COUNTERS]; + /* FD RSSI stats for UL TB PPDUs. Units dBm */ + A_INT8 rx_ul_mumimo_fd_rssi[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS][HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS]; + /* Pilot EVM Stats */ + A_INT8 rx_ulmumimo_pilot_evm_dB_mean[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS][HTT_RX_PDEV_STATS_ULMUMIMO_NUM_SPATIAL_STREAMS]; } htt_rx_pdev_ul_mumimo_trig_stats_tlv; /* STATS_TYPE : HTT_DBG_EXT_STATS_PDEV_UL_MUMIMO_TRIG_STATS @@ -4728,6 +4746,7 @@ typedef struct { #define HTT_LATENCY_PROFILE_MAX_HIST 3 #define HTT_STATS_MAX_PROF_STATS_NAME_LEN 32 +#define HTT_INTERRUPTS_LATENCY_PROFILE_MAX_HIST 3 typedef struct { htt_tlv_hdr_t tlv_hdr; /* print_header: @@ -4753,6 +4772,20 @@ typedef struct { */ A_UINT32 hist_intvl; A_UINT32 hist[HTT_LATENCY_PROFILE_MAX_HIST]; + A_UINT32 page_fault_max; /* max page faults in any 1 sampling window */ + A_UINT32 page_fault_total; /* summed over all sampling windows */ + /* ignored_latency_count: + * ignore some of profile latency to avoid avg skewing + */ + A_UINT32 ignored_latency_count; + /* interrupts_max: max interrupts within any single sampling window */ + A_UINT32 interrupts_max; + /* interrupts_hist: histogram of interrupt rate + * bin0 contains the number of sampling windows that had 0 interrupts, + * bin1 contains the number of sampling windows that had 1-4 interrupts, + * bin2 contains the number of sampling windows that had > 4 interrupts + */ + A_UINT32 interrupts_hist[HTT_INTERRUPTS_LATENCY_PROFILE_MAX_HIST]; } htt_latency_prof_stats_tlv; typedef struct { diff --git a/drivers/staging/fw-api/fw/wmi_services.h b/drivers/staging/fw-api/fw/wmi_services.h index c6acdd985333..b2b02cb3c1c5 100644 --- a/drivers/staging/fw-api/fw/wmi_services.h +++ b/drivers/staging/fw-api/fw/wmi_services.h @@ -442,6 +442,7 @@ typedef enum { WMI_SERVICE_5_DOT_9GHZ_SUPPORT = 247, /* Indicates FW supports new 5.9GHZ (scan, connection and so on) */ WMI_SERVICE_MU_PREAMBLE_PUNCTURE_SUPPORT = 248, /* Indicates FW supports MU preamble puncture */ WMI_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT = 249, /* Support for SRG, SRP based spatial reuse support */ + WMI_REQUEST_CTRL_PATH_STATS_REQUEST = 250, /* FW supports control path stats */ /******* ADD NEW SERVICES UP TO 256 HERE *******/ diff --git a/drivers/staging/fw-api/fw/wmi_tlv_defs.h b/drivers/staging/fw-api/fw/wmi_tlv_defs.h index 9d3a5a270faa..c9bdc6fbd861 100644 --- a/drivers/staging/fw-api/fw/wmi_tlv_defs.h +++ b/drivers/staging/fw-api/fw/wmi_tlv_defs.h @@ -1076,6 +1076,11 @@ typedef enum { WMITLV_TAG_STRUC_wmi_pdev_non_srg_obss_color_enable_bitmap_cmd_fixed_param, WMITLV_TAG_STRUC_wmi_pdev_non_srg_obss_bssid_enable_bitmap_cmd_fixed_param, WMITLV_TAG_STRUC_wmi_roam_capability_report_event_fixed_param, + WMITLV_TAG_STRUC_wmi_pmf_bcn_protect_stats, + WMITLV_TAG_STRUC_wmi_nan_capabilities, + WMITLV_TAG_STRUC_wmi_request_ctrl_path_stats_cmd_fixed_param, + WMITLV_TAG_STRUC_wmi_ctrl_path_stats_event_fixed_param, + WMITLV_TAG_STRUC_wmi_ctrl_path_pdev_stats_struct, } WMITLV_TAG_ID; /* @@ -1520,6 +1525,7 @@ typedef enum { OP(WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID) \ OP(WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID) \ OP(WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID) \ + OP(WMI_REQUEST_CTRL_PATH_STATS_CMDID) \ /* add new CMD_LIST elements above this line */ @@ -1770,6 +1776,7 @@ typedef enum { OP(WMI_AUDIO_AGGR_REPORT_STATISTICS_EVENTID) \ OP(WMI_PDEV_SSCAN_FW_PARAM_EVENTID) \ OP(WMI_ROAM_CAPABILITY_REPORT_EVENTID) \ + OP(WMI_CTRL_PATH_STATS_EVENTID) \ /* add new EVT_LIST elements above this line */ @@ -3988,6 +3995,14 @@ WMITLV_CREATE_PARAM_STRUC(WMI_REQUEST_WLAN_STATS_CMDID); WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_request_peer_stats_info_cmd_fixed_param, wmi_request_peer_stats_info_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) WMITLV_CREATE_PARAM_STRUC(WMI_REQUEST_PEER_STATS_INFO_CMDID); +/* Request Control Path stats info cmd */ +#define WMITLV_TABLE_WMI_REQUEST_CTRL_PATH_STATS_CMDID(id,op,buf,len) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_request_ctrl_path_stats_cmd_fixed_param, wmi_request_ctrl_path_stats_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)\ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, pdev_ids, WMITLV_SIZE_VAR)\ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, vdev_ids, WMITLV_SIZE_VAR)\ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_FIXED_STRUC, wmi_mac_addr, mac_addr_list, WMITLV_SIZE_VAR) +WMITLV_CREATE_PARAM_STRUC(WMI_REQUEST_CTRL_PATH_STATS_CMDID); + /* Host sets the current country code */ #define WMITLV_TABLE_WMI_SET_CURRENT_COUNTRY_CMDID(id,op,buf,len) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_set_current_country_cmd_fixed_param, wmi_set_current_country_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX) @@ -4458,7 +4473,8 @@ WMITLV_CREATE_PARAM_STRUC(WMI_SERVICE_READY_EXT_EVENTID); WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, WMI_DMA_RING_CAPABILITIES, dma_ring_caps, WMITLV_SIZE_VAR) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_spectral_bin_scaling_params, wmi_bin_scaling_params, WMITLV_SIZE_VAR) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, WMI_MAC_PHY_CAPABILITIES_EXT, mac_phy_caps, WMITLV_SIZE_VAR) \ - WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, WMI_HAL_REG_CAPABILITIES_EXT2, hal_reg_caps, WMITLV_SIZE_VAR) + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, WMI_HAL_REG_CAPABILITIES_EXT2, hal_reg_caps, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_nan_capabilities, wmi_nan_capabilities, nan_cap, WMITLV_SIZE_FIX) WMITLV_CREATE_PARAM_STRUC(WMI_SERVICE_READY_EXT2_EVENTID); #define WMITLV_TABLE_WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID(id,op,buf,len) \ @@ -4916,7 +4932,8 @@ WMITLV_CREATE_PARAM_STRUC(WMI_HOST_SWFDA_EVENTID); WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_per_chain_rssi_stats, wmi_per_chain_rssi_stats, chain_stats, WMITLV_SIZE_FIX) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_rssi_stats, rssi_stats, WMITLV_SIZE_VAR) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_congestion_stats, congestion_stats, WMITLV_SIZE_VAR) \ - WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_peer_extd2_stats, peer_extd2_stats, WMITLV_SIZE_VAR) + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_peer_extd2_stats, peer_extd2_stats, WMITLV_SIZE_VAR) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_pmf_bcn_protect_stats, pmf_bcn_protect_stats, WMITLV_SIZE_VAR) WMITLV_CREATE_PARAM_STRUC(WMI_UPDATE_STATS_EVENTID); /* Update PN response Event */ @@ -5721,6 +5738,12 @@ WMITLV_CREATE_PARAM_STRUC(WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID); WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_peer_stats_info, peer_stats_info, WMITLV_SIZE_VAR) WMITLV_CREATE_PARAM_STRUC(WMI_PEER_STATS_INFO_EVENTID); +/* Update Control Path stats event */ +#define WMITLV_TABLE_WMI_CTRL_PATH_STATS_EVENTID(id, op, buf, len) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_ctrl_path_stats_event_fixed_param, wmi_ctrl_path_stats_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) \ + WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_ctrl_path_pdev_stats_struct, ctrl_path_pdev_stats, WMITLV_SIZE_VAR) +WMITLV_CREATE_PARAM_STRUC(WMI_CTRL_PATH_STATS_EVENTID); + #define WMITLV_TABLE_WMI_RADIO_CHAN_STATS_EVENTID(id, op, buf, len) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_radio_chan_stats_event_fixed_param, wmi_radio_chan_stats_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) \ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_radio_chan_stats, radio_chan_stats, WMITLV_SIZE_VAR) diff --git a/drivers/staging/fw-api/fw/wmi_unified.h b/drivers/staging/fw-api/fw/wmi_unified.h index c0bf6382701d..2b9c0821121b 100644 --- a/drivers/staging/fw-api/fw/wmi_unified.h +++ b/drivers/staging/fw-api/fw/wmi_unified.h @@ -914,6 +914,9 @@ typedef enum { /** request for WLM (wlan latency manager) stats */ WMI_REQUEST_WLM_STATS_CMDID, + /** request for control path stats */ + WMI_REQUEST_CTRL_PATH_STATS_CMDID, + /** ARP OFFLOAD REQUEST*/ WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_CMD_GRP_START_ID(WMI_GRP_ARP_NS_OFL), @@ -1732,6 +1735,10 @@ typedef enum { * and report WLM (WLAN latency manager) stats info to host */ WMI_WLM_STATS_EVENTID, + /** This event is used to respond to WMI_REQUEST_CTRL_PATH_STATS_CMDID + * and report stats info to host */ + WMI_CTRL_PATH_STATS_EVENTID, + /* NLO specific events */ /** NLO match event after the first match */ @@ -3651,6 +3658,13 @@ typedef struct { * for any EMA VAP on any pdev. */ A_UINT32 ema_max_profile_period; + /** @brief max_ndp_sessions + * This is the max ndp sessions sent by the host which is the minimum + * of the value requested within the host's ini configurations and + * the max ndp sessions supported by the firmware (as reported in the + * SERVICE_READY_EXT2_EVENT message). + */ + A_UINT32 max_ndp_sessions; } wmi_resource_config; #define WMI_MSDU_FLOW_AST_ENABLE_GET(msdu_flow_config0, ast_x) \ @@ -6818,6 +6832,23 @@ typedef enum { #define WMI_PDEV_LSIG_LEN_DURATION_GET(lsig_len) WMI_GET_BITS(lsig_len, 0, 30) #define WMI_PDEV_LSIG_LEN_DURATION_SET(lsig_len, value) WMI_SET_BITS(lsig_len, 0, 30, value) +#define WMI_PDEV_IS_NON_SRG_ENABLED(pd_threshold_cfg) WMI_GET_BITS(pd_threshold_cfg, 31, 1) +#define WMI_PDEV_NON_SRG_ENABLE(pd_threshold_cfg) WMI_SET_BITS(pd_threshold_cfg, 31, 1, 1) +#define WMI_PDEV_NON_SRG_DISABLE(pd_threshold_cfg) WMI_SET_BITS(pd_threshold_cfg, 31, 1, 0) +#define WMI_PDEV_NON_SRG_PD_THRESHOLD_SET(pd_threshold_cfg, value) WMI_SET_BITS(pd_threshold_cfg, 0, 8, value) +#define WMI_PDEV_NON_SRG_PD_THRESHOLD_GET(pd_threshold_cfg) WMI_GET_BITS(pd_threshold_cfg, 0, 8) + +#define WMI_PDEV_IS_SRG_ENABLED(pd_threshold_cfg) WMI_GET_BITS(pd_threshold_cfg, 30, 1) +#define WMI_PDEV_SRG_ENABLE(pd_threshold_cfg) WMI_SET_BITS(pd_threshold_cfg, 30, 1, 1) +#define WMI_PDEV_SRG_DISABLE(pd_threshold_cfg) WMI_SET_BITS(pd_threshold_cfg, 30, 1, 0) +#define WMI_PDEV_SRG_PD_THRESHOLD_SET(pd_threshold_cfg, value) WMI_SET_BITS(pd_threshold_cfg, 8, 8, value) +#define WMI_PDEV_SRG_PD_THRESHOLD_GET(pd_threshold_cfg) WMI_GET_BITS(pd_threshold_cfg, 8, 8) + +#define WMI_PDEV_OBSS_PD_ENABLE_PER_AC_SET(per_ac_cfg, value) WMI_SET_BITS(per_ac_cfg, 0, 4, value) + #define WMI_PDEV_OBSS_PD_ENABLE_PER_AC_GET(per_ac_cfg) WMI_GET_BITS(per_ac_cfg, 0, 4) +#define WMI_PDEV_SRP_ENABLE_PER_AC_SET(per_ac_cfg, value) WMI_SET_BITS(per_ac_cfg, 16, 4, value) + #define WMI_PDEV_SRP_ENABLE_PER_AC_GET(per_ac_cfg) WMI_GET_BITS(per_ac_cfg, 16, 4) + typedef struct { A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_pdev_set_param_cmd_fixed_param */ /** pdev_id for identifying the MAC @@ -7533,21 +7564,22 @@ typedef struct { } wmi_pdev_set_wmm_params_cmd_fixed_param; typedef enum { - WMI_REQUEST_PEER_STAT = 0x0001, - WMI_REQUEST_AP_STAT = 0x0002, - WMI_REQUEST_PDEV_STAT = 0x0004, - WMI_REQUEST_VDEV_STAT = 0x0008, - WMI_REQUEST_BCNFLT_STAT = 0x0010, - WMI_REQUEST_VDEV_RATE_STAT = 0x0020, - WMI_REQUEST_INST_STAT = 0x0040, - WMI_REQUEST_MIB_STAT = 0x0080, - WMI_REQUEST_RSSI_PER_CHAIN_STAT = 0x0100, - WMI_REQUEST_CONGESTION_STAT = 0x0200, - WMI_REQUEST_PEER_EXTD_STAT = 0x0400, - WMI_REQUEST_BCN_STAT = 0x0800, - WMI_REQUEST_BCN_STAT_RESET = 0x1000, - WMI_REQUEST_PEER_EXTD2_STAT = 0x2000, - WMI_REQUEST_MIB_EXTD_STAT = 0x4000, + WMI_REQUEST_PEER_STAT = 0x0001, + WMI_REQUEST_AP_STAT = 0x0002, + WMI_REQUEST_PDEV_STAT = 0x0004, + WMI_REQUEST_VDEV_STAT = 0x0008, + WMI_REQUEST_BCNFLT_STAT = 0x0010, + WMI_REQUEST_VDEV_RATE_STAT = 0x0020, + WMI_REQUEST_INST_STAT = 0x0040, + WMI_REQUEST_MIB_STAT = 0x0080, + WMI_REQUEST_RSSI_PER_CHAIN_STAT = 0x0100, + WMI_REQUEST_CONGESTION_STAT = 0x0200, + WMI_REQUEST_PEER_EXTD_STAT = 0x0400, + WMI_REQUEST_BCN_STAT = 0x0800, + WMI_REQUEST_BCN_STAT_RESET = 0x1000, + WMI_REQUEST_PEER_EXTD2_STAT = 0x2000, + WMI_REQUEST_MIB_EXTD_STAT = 0x4000, + WMI_REQUEST_PMF_BCN_PROTECT_STAT = 0x8000, } wmi_stats_id; /* @@ -8268,6 +8300,10 @@ typedef struct { * num_mib_extd_stats * size of(struct wmi_mib_extd_stats) * following the information elements listed above. */ +/* If WMI_REQUEST_PMF_BCN_PROTECT_STAT is set in stats_id, then TLV + * wmi_pmf_bcn_protect_stats pmf_bcn_protect_stats[] + * follows the other TLVs + */ } wmi_stats_event_fixed_param; /* WLAN channel CCA stats bitmap */ @@ -8828,6 +8864,73 @@ typedef struct { */ } wmi_peer_stats_info_event_fixed_param; +/** + * WMI arrays of length WMI_MGMT_FRAME_SUBTYPE_MAX use the + * IEEE802.11 standard's enumeration of mgmt frame subtypes: + * 0 -> IEEE80211_FC0_SUBTYPE_ASSOC_REQ + * 1 -> IEEE80211_FC0_SUBTYPE_ASSOC_RESP + * 2 -> IEEE80211_FC0_SUBTYPE_REASSOC_REQ + * 3 -> IEEE80211_FC0_SUBTYPE_REASSOC_RESP + * 4 -> IEEE80211_FC0_SUBTYPE_PROBE_REQ + * 5 -> IEEE80211_FC0_SUBTYPE_PROBE_RESP + * 6 -> Reserved + * 7 -> Reserved + * 8 -> IEEE80211_FC0_SUBTYPE_BEACON + * 9 -> IEEE80211_FC0_SUBTYPE_ATIM + * 10 -> IEEE80211_FC0_SUBTYPE_DISASSOC + * 11 -> IEEE80211_FC0_SUBTYPE_AUTH + * 12 -> IEEE80211_FC0_SUBTYPE_DEAUTH + * 13 -> IEEE80211_FCO_SUBTYPE_ACTION + * 14 -> IEEE80211_FC0_SUBTYPE_ACTION_NOACK + * 15 -> IEEE80211_FC0_SUBTYPE_RESERVED + */ +#define WMI_MGMT_FRAME_SUBTYPE_MAX 16 + +typedef struct { + /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_ctrl_path_pdev_stats_struct*/ + A_UINT32 tlv_header; + /** pdev_id for identifying the MAC */ + A_UINT32 pdev_id; + /** counter of how many times this pdev has + * transmitted each management frame sub-type */ + A_UINT32 tx_mgmt_subtype[WMI_MGMT_FRAME_SUBTYPE_MAX]; + /** counter of how many times this pdev has + * received each management frame sub-type */ + A_UINT32 rx_mgmt_subtype[WMI_MGMT_FRAME_SUBTYPE_MAX]; + /** scan fail dfs violation time in ms */ + A_UINT32 scan_fail_dfs_violation_time_ms; + /** NOL check failed latest channel frequency in MHz */ + A_UINT32 nol_check_fail_last_chan_freq; + /** NOL check failed timestamp in ms */ + A_UINT32 nol_check_fail_time_stamp_ms; + /** total peer create count */ + A_UINT32 total_peer_create_cnt; + /** total peer delete count */ + A_UINT32 total_peer_delete_cnt; + /** total peer delete response count */ + A_UINT32 total_peer_delete_resp_cnt; + /** sched algo FIFO full count */ + A_UINT32 vdev_pause_fail_rt_to_sched_algo_fifo_full_cnt; +} wmi_ctrl_path_pdev_stats_struct; + +typedef struct { + /** TLV tag and len; tag equals + * WMITLV_TAG_STRUC_wmi_ctrl_path_stats_event_fixed_param */ + A_UINT32 tlv_header; + /** Request ID*/ + A_UINT32 request_id; + /** more flag + * 1 - More events sent after this event. + * 0 - no more events after this event. + */ + A_UINT32 more; + /** This TLV is (optionally) followed by TLV arrays containing + * different types of stats: + * 1. wmi_ctrl_path_pdev_stats_struct ctrl_path_pdev_stats[]; + * This TLV array contains zero or more pdev stats instances. + */ +} wmi_ctrl_path_stats_event_fixed_param; + typedef struct { /** TLV tag and len; tag equals * WMITLV_TAG_STRUC_wmi_radio_chan_stats */ @@ -9122,6 +9225,17 @@ typedef struct { A_UINT32 reserved[8]; /* Reserve more fields for future extension */ } wmi_mib_extd_stats; +/** + * Beacon protection statistics. + */ +typedef struct { + A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_pmf_bcn_protect_stats */ + A_UINT32 igtk_mic_fail_cnt; /* MIC failure count of management packets using IGTK */ + A_UINT32 igtk_replay_cnt; /* Replay detection count of management packets using IGTK */ + A_UINT32 bcn_mic_fail_cnt; /* MIC failure count of beacon packets using BIGTK */ + A_UINT32 bcn_replay_cnt; /* Replay detection count of beacon packets using BIGTK */ +} wmi_pmf_bcn_protect_stats; + typedef struct { A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_rssi_stats */ A_UINT32 vdev_id; @@ -9974,14 +10088,14 @@ typedef struct { A_UINT32 profile_idx; /** the total profile numbers of non-trans aps (mbssid case). 0 means legacy AP */ A_UINT32 profile_num; + /** flags - this is a bitwise-or combination of WMI_VDEV_UP_FLAGS values */ + A_UINT32 flags; } wmi_vdev_up_cmd_fixed_param; typedef struct { A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_vdev_stop_cmd_fixed_param */ /** unique id identifying the VDEV, generated by the caller */ A_UINT32 vdev_id; - /** flags - this is a bitwise-or combination of WMI_VDEV_UP_FLAGS values */ - A_UINT32 flags; } wmi_vdev_stop_cmd_fixed_param; typedef struct { @@ -10907,7 +11021,7 @@ typedef enum { * take effect if the WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD * setting is also set for the pdev that the vdev belongs to. */ - WMI_VDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD, + WMI_VDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD, /* 0x9E */ /* Parameter used to configure OBSS Packet Detection per Access Category * for SRP based and OBSS_PD based spatial reuse feature. @@ -10931,7 +11045,14 @@ typedef enum { * if the WMI_PDEV_PARAM_SET_CMD_OBSS_PD_PER_AC setting is also set for * the pdev that the vdev belongs to. */ - WMI_VDEV_PARAM_SET_CMD_OBSS_PD_PER_AC, + WMI_VDEV_PARAM_SET_CMD_OBSS_PD_PER_AC, /* 0x9F */ + + /** + * VDEV parameter to indicate RSN (Robust Security Network) capability. + * This value will be intersection of the local vdev's (STA's) + * RSN capability and the peer's (AP's) RSN capability. + */ + WMI_VDEV_PARAM_RSN_CAPABILITY, /* 0xA0 */ /* Parameter used to enable/disable SRP feature */ WMI_VDEV_PARAM_ENABLE_SRP, @@ -13513,7 +13634,8 @@ typedef struct { #define WLAN_ROAM_SCORE_BAND_2G_INDEX 0 #define WLAN_ROAM_SCORE_BAND_5G_INDEX 1 -/* 2 and 3 are reserved */ +#define WLAN_ROAM_SCORE_BAND_6G_INDEX 2 +/* 3 is reserved */ #define WLAN_ROAM_SCORE_MAX_BAND_INDEX 4 #define WMI_ROAM_GET_BAND_SCORE_PERCENTAGE(value32, band_index) WMI_GET_BITS(value32, (8 * (band_index)), 8) #define WMI_ROAM_SET_BAND_SCORE_PERCENTAGE(value32, score_pcnt, band_index) WMI_SET_BITS(value32, (8 * (band_index)), 8, score_pcnt) @@ -16099,6 +16221,10 @@ typedef struct { A_UINT8 gtk_keyLength; /* GTK key length */ A_UINT8 gtk_keyRSC[GTK_REPLAY_COUNTER_BYTES]; /* GTK key replay sequence counter */ A_UINT8 gtk_key[WMI_MAX_KEY_LEN]; /* GTK key data */ + A_UINT8 bigtk_keyIndex; /* Use if IGTK_OFFLOAD is defined */ + A_UINT8 bigtk_keyLength; /* Use if IGTK_OFFLOAD is defined */ + A_UINT8 bigtk_keyRSC[IGTK_PN_SIZE]; /* key replay sequence counter *//* Use if IGTK_OFFLOAD is defined */ + A_UINT8 bigtk_key[WMI_MAX_KEY_LEN]; /* Use if IGTK_OFFLOAD is defined */ } WMI_GTK_OFFLOAD_STATUS_EVENT_fixed_param; typedef struct { @@ -18988,6 +19114,13 @@ typedef struct { */ } wmi_nan_dmesg_event_fixed_param; +typedef struct { + /** TLV tag and len; tag equals WMITLV_TAG_STRUCT_wmi_nan_capabilities */ + A_UINT32 tlv_header; + /** Maximum number of ndp sessions supported by the Firmware */ + A_UINT32 max_ndp_sessions; +} wmi_nan_capabilities; + /** NAN DATA CMD's */ /** @@ -24616,6 +24749,55 @@ typedef struct { A_UINT32 reset_after_request; } wmi_request_peer_stats_info_cmd_fixed_param; +typedef enum { + /* + * Multiple stats type can be requested together, so each value + * within this enum represents a bit within a stats bitmap. + */ + WMI_REQUEST_CTRL_PATH_PDEV_TX_STAT = 0x00000001, +} wmi_ctrl_path_stats_id; + +typedef enum { + /* + * The following stats actions are mutually exclusive. + * A single stats request message can only specify one action. + */ + WMI_REQUEST_CTRL_PATH_STAT_GET = 1, + WMI_REQUEST_CTRL_PATH_STAT_RESET = 2, + WMI_REQUEST_CTRL_PATH_STAT_START = 3, + WMI_REQUEST_CTRL_PATH_STAT_STOP = 4, +} wmi_ctrl_path_stats_action; + +typedef struct { + /** TLV tag and len; tag equals + * WMITLV_TAG_STRUC_wmi_request_ctrl_path_stats_cmd_fixed_param */ + A_UINT32 tlv_header; + /** Bitmask showing which of stats IDs 0-31 have been requested. + * These stats ids are defined in enum wmi_ctrl_path_stats_id. + */ + A_UINT32 stats_id_mask; + /** request ID to store the cookies in wifistats */ + A_UINT32 request_id; + /** action + * get/reset/start/stop based on stats id + * defined as a part of wmi_ctrl_path_stats_action + **/ + A_UINT32 action; /* refer to wmi_ctrl_path_stats_action */ + + /** The below TLV arrays optionally follow this fixed_param TLV structure: + * 1. A_UINT32 pdev_ids[]; + * If this array is present and non-zero length, stats should only + * be provided from the pdevs identified in the array. + * 2. A_UINT32 vdev_ids[]; + * If this array is present and non-zero length, stats should only + * be provided from the vdevs identified in the array. + * 3. wmi_mac_addr peer_macaddr[]; + * If this array is present and non-zero length, stats should only + * be provided from the peers with the MAC addresses specified + * in the array. + */ +} wmi_request_ctrl_path_stats_cmd_fixed_param; + typedef enum { WMI_REQUEST_ONE_RADIO_CHAN_STATS = 0x01, /* request stats of one specified channel */ WMI_REQUEST_ALL_RADIO_CHAN_STATS = 0x02, /* request stats of all channels */ @@ -26131,6 +26313,7 @@ static INLINE A_UINT8 *wmi_id_to_name(A_UINT32 wmi_command) WMI_RETURN_STRING(WMI_ANT_CONTROLLER_CMDID); WMI_RETURN_STRING(WMI_SIMULATION_TEST_CMDID); WMI_RETURN_STRING(WMI_AUDIO_AGGR_SET_RTSCTS_CONFIG_CMDID); + WMI_RETURN_STRING(WMI_REQUEST_CTRL_PATH_STATS_CMDID); } return "Invalid WMI cmd"; diff --git a/drivers/staging/fw-api/fw/wmi_version.h b/drivers/staging/fw-api/fw/wmi_version.h index 737285e33b20..320431308605 100644 --- a/drivers/staging/fw-api/fw/wmi_version.h +++ b/drivers/staging/fw-api/fw/wmi_version.h @@ -36,7 +36,7 @@ #define __WMI_VER_MINOR_ 0 /** WMI revision number has to be incremented when there is a * change that may or may not break compatibility. */ -#define __WMI_REVISION_ 836 +#define __WMI_REVISION_ 843 /** The Version Namespace should not be normally changed. Only * host and firmware of the same WMI namespace will work diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_recv.c b/drivers/staging/qca-wifi-host-cmn/htc/htc_recv.c index bd52a9ed88e9..1f18e2a63446 100644 --- a/drivers/staging/qca-wifi-host-cmn/htc/htc_recv.c +++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_recv.c @@ -22,7 +22,7 @@ #include /* qdf_nbuf_t */ /* HTC Control message receive timeout msec */ -#define HTC_CONTROL_RX_TIMEOUT 3000 +#define HTC_CONTROL_RX_TIMEOUT 6000 #if defined(WLAN_DEBUG) || defined(DEBUG) void debug_dump_bytes(uint8_t *buffer, uint16_t length, char *pDescription) diff --git a/drivers/staging/qca-wifi-host-cmn/os_if/linux/scan/src/wlan_cfg80211_scan.c b/drivers/staging/qca-wifi-host-cmn/os_if/linux/scan/src/wlan_cfg80211_scan.c index 453571bf65ca..caabb23b460a 100644 --- a/drivers/staging/qca-wifi-host-cmn/os_if/linux/scan/src/wlan_cfg80211_scan.c +++ b/drivers/staging/qca-wifi-host-cmn/os_if/linux/scan/src/wlan_cfg80211_scan.c @@ -1640,8 +1640,10 @@ QDF_STATUS wlan_abort_scan(struct wlan_objmgr_pdev *pdev, req->cancel_req.scan_id = scan_id; req->cancel_req.pdev_id = pdev_id; req->cancel_req.vdev_id = vdev_id; - if (scan_id != INVAL_SCAN_ID) + if (scan_id != INVAL_SCAN_ID && scan_id != CANCEL_HOST_SCAN_ID) req->cancel_req.req_type = WLAN_SCAN_CANCEL_SINGLE; + else if (scan_id == CANCEL_HOST_SCAN_ID) + req->cancel_req.req_type = WLAN_SCAN_CANCEL_HOST_VDEV_ALL; else if (vdev_id == INVAL_VDEV_ID) req->cancel_req.req_type = WLAN_SCAN_CANCEL_PDEV_ALL; else diff --git a/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/src/target_if_mc_cp_stats.c b/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/src/target_if_mc_cp_stats.c index f22a62036bb1..164554dc9367 100644 --- a/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/src/target_if_mc_cp_stats.c +++ b/drivers/staging/qca-wifi-host-cmn/target_if/cp_stats/src/target_if_mc_cp_stats.c @@ -222,6 +222,10 @@ static QDF_STATUS target_if_cp_stats_extract_vdev_summary_stats( dat_snr = vdev_stats.vdev_snr.dat_snr; ev->vdev_summary_stats[i].vdev_id = vdev_stats.vdev_id; + cp_stats_debug("vdev %d SNR bcn: %d data: %d", + ev->vdev_summary_stats[i].vdev_id, bcn_snr, + dat_snr); + for (j = 0; j < 4; j++) { ev->vdev_summary_stats[i].stats.tx_frm_cnt[j] = vdev_stats.tx_frm_cnt[j]; diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/inc/wlan_serialization_api.h b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/inc/wlan_serialization_api.h index 392ecdaf96e8..99af2a7fbc1b 100644 --- a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/inc/wlan_serialization_api.h +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/inc/wlan_serialization_api.h @@ -169,12 +169,14 @@ enum wlan_serialization_cmd_type { * @WLAN_SER_CANCEL_SINGLE_SCAN: Cancel a single scan with a given ID * @WLAN_SER_CANCEL_PDEV_SCANS: Cancel all the scans on a given pdev * @WLAN_SER_CANCEL_VDEV_SCANS: Cancel all the scans on given vdev + * @WLAN_SER_CANCEL_VDEV_HOST_SCANS: Cancel all host scans on given vdev * @WLAN_SER_CANCEL_NON_SCAN_CMD: Cancel the given non scan command */ enum wlan_serialization_cancel_type { WLAN_SER_CANCEL_SINGLE_SCAN, WLAN_SER_CANCEL_PDEV_SCANS, WLAN_SER_CANCEL_VDEV_SCANS, + WLAN_SER_CANCEL_VDEV_HOST_SCANS, WLAN_SER_CANCEL_NON_SCAN_CMD, WLAN_SER_CANCEL_MAX, }; diff --git a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_dequeue.c b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_dequeue.c index 10602f21ef39..f82a1749ff32 100644 --- a/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_dequeue.c +++ b/drivers/staging/qca-wifi-host-cmn/umac/cmn_services/serialization/src/wlan_serialization_dequeue.c @@ -623,6 +623,7 @@ wlan_serialization_find_and_cancel_cmd( NULL, cmd.cmd_type); break; case WLAN_SER_CANCEL_VDEV_SCANS: + case WLAN_SER_CANCEL_VDEV_HOST_SCANS: /* remove all scan cmds which matches the vdev object */ status = wlan_serialization_cmd_cancel_handler(ser_obj, NULL, NULL, diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_filter.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_filter.c index db535b3c6efb..0611c31d9d78 100644 --- a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_filter.c +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_filter.c @@ -1148,6 +1148,10 @@ bool scm_filter_match(struct wlan_objmgr_psoc *psoc, if (!def_param) return false; + if (filter->age_threshold && + filter->age_threshold < util_scan_entry_age(db_entry)) + return false; + roam_params = &def_param->roam_params; if (filter->p2p_results && !db_entry->is_p2p) diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_manager.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_manager.c index 11261c1060f9..984b4f6e0dea 100644 --- a/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_manager.c +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/core/src/wlan_scan_manager.c @@ -524,6 +524,9 @@ get_serialization_cancel_type(enum scan_cancel_req_type type) case WLAN_SCAN_CANCEL_PDEV_ALL: serialization_type = WLAN_SER_CANCEL_PDEV_SCANS; break; + case WLAN_SCAN_CANCEL_HOST_VDEV_ALL: + serialization_type = WLAN_SER_CANCEL_VDEV_HOST_SCANS; + break; default: QDF_ASSERT(0); scm_warn("invalid scan_cancel_req_type: %d", type); diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_public_structs.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_public_structs.h index 6a369610f348..d3ba67f40013 100644 --- a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_public_structs.h +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_public_structs.h @@ -40,6 +40,7 @@ typedef uint32_t wlan_scan_id; #define SCM_CANCEL_SCAN_WAIT_ITERATION 600 #define INVAL_SCAN_ID 0xFFFFFFFF +#define CANCEL_HOST_SCAN_ID 0xFFFFFFFE #define INVAL_VDEV_ID 0xFFFFFFFF #define INVAL_PDEV_ID 0xFFFFFFFF @@ -558,7 +559,7 @@ struct fils_filter_info { struct scan_filter { bool bss_scoring_required; bool enable_adaptive_11r; - uint32_t age_threshold; + qdf_time_t age_threshold; uint32_t p2p_results; uint32_t rrm_measurement_filter; uint32_t num_of_bssid; @@ -959,12 +960,15 @@ struct scan_start_request { * enum scan_cancel_type - type specifiers for cancel scan request * @WLAN_SCAN_CANCEL_SINGLE: cancel particular scan specified by scan_id * @WLAN_SCAN_CANCEL_VAP_ALL: cancel all scans running on a particular vdevid - * WLAN_SCAN_CANCEL_PDEV_ALL: cancel all scans running on parent pdev of vdevid + * @WLAN_SCAN_CANCEL_PDEV_ALL: cancel all scans running on parent pdev of vdevid + * @WLAN_SCAN_CANCEL_HOST_VDEV_ALL: Cancel all host triggered scans alone on + * vdev */ enum scan_cancel_req_type { WLAN_SCAN_CANCEL_SINGLE = 1, WLAN_SCAN_CANCEL_VDEV_ALL, WLAN_SCAN_CANCEL_PDEV_ALL, + WLAN_SCAN_CANCEL_HOST_VDEV_ALL, }; /** diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_ucfg_api.h b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_ucfg_api.h index ac87f2f61b7a..9bec549eb387 100644 --- a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_ucfg_api.h +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/inc/wlan_scan_ucfg_api.h @@ -656,4 +656,12 @@ bool ucfg_scan_wake_lock_in_user_scan(struct wlan_objmgr_psoc *psoc); * Return: Max Scan commands allowed count */ uint32_t ucfg_scan_get_max_cmd_allowed(void); + +/** + * wlan_scan_get_aging_time - Get the scan aging time config + * @psoc: psoc context + * + * Return: Scan aging time config + */ +qdf_time_t ucfg_scan_get_aging_time(struct wlan_objmgr_psoc *psoc); #endif diff --git a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_ucfg_api.c b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_ucfg_api.c index 589c20dbf446..734e75f4e3a2 100644 --- a/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_ucfg_api.c +++ b/drivers/staging/qca-wifi-host-cmn/umac/scan/dispatcher/src/wlan_scan_ucfg_api.c @@ -1284,11 +1284,10 @@ ucfg_scan_cancel_sync(struct scan_cancel_request *req) return QDF_STATUS_E_NULL_VALUE; } - if (req->cancel_req.req_type == - WLAN_SCAN_CANCEL_PDEV_ALL) + if (req->cancel_req.req_type == WLAN_SCAN_CANCEL_PDEV_ALL) cancel_pdev = true; - else if (req->cancel_req.req_type == - WLAN_SCAN_CANCEL_VDEV_ALL) + else if (req->cancel_req.req_type == WLAN_SCAN_CANCEL_VDEV_ALL || + req->cancel_req.req_type == WLAN_SCAN_CANCEL_HOST_VDEV_ALL) cancel_vdev = true; vdev = req->vdev; @@ -2531,3 +2530,14 @@ uint32_t ucfg_scan_get_max_cmd_allowed(void) { return MAX_SCAN_COMMANDS; } + +qdf_time_t ucfg_scan_get_aging_time(struct wlan_objmgr_psoc *psoc) +{ + struct wlan_scan_obj *scan_obj; + + scan_obj = wlan_psoc_get_scan_obj(psoc); + if (!scan_obj) + return SCAN_CACHE_AGING_TIME; + + return scan_obj->scan_def.scan_cache_aging_time; +} diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h index e0397cdfe68a..f5a54a46e1d8 100644 --- a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h +++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h @@ -6238,6 +6238,8 @@ typedef enum { wmi_service_packet_capture_support, wmi_service_time_sync_ftm, wmi_roam_scan_chan_list_to_host_support, + wmi_service_host_scan_stop_vdev_all, + wmi_service_suiteb_roam_support, wmi_services_max, } wmi_conv_service_ids; #define WMI_SERVICE_UNAVAILABLE 0xFFFF diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c index 1a5e39b18f24..8cbe77cdcaaa 100644 --- a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c +++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c @@ -3058,6 +3058,8 @@ static QDF_STATUS send_scan_stop_cmd_tlv(wmi_unified_t wmi_handle, } else if (param->req_type == WLAN_SCAN_CANCEL_SINGLE) { /* Cancelling specific scan */ cmd->req_type = WMI_SCAN_STOP_ONE; + } else if (param->req_type == WLAN_SCAN_CANCEL_HOST_VDEV_ALL) { + cmd->req_type = WMI_SCN_STOP_HOST_VAP_ALL; } else { WMI_LOGE("%s: Invalid Command : ", __func__); wmi_buf_free(wmi_buf); @@ -6773,6 +6775,18 @@ static QDF_STATUS send_roam_scan_offload_mode_cmd_tlv(wmi_unified_t wmi_handle, roam_req->psk_pmk, roam_offload_11i->pmk_len); + if (auth_mode == + WMI_AUTH_RSNA_SUITE_B_8021X_SHA384) { + roam_offload_11i->pmk_ext_len = + (roam_req->pmk_len - + ROAM_OFFLOAD_PMK_BYTES); + qdf_mem_copy(roam_offload_11i->pmk_ext, + &roam_req->psk_pmk[ + ROAM_OFFLOAD_PMK_BYTES], + roam_offload_11i-> + pmk_ext_len); + } + WMITLV_SET_HDR(&roam_offload_11i->tlv_header, WMITLV_TAG_STRUC_wmi_roam_11i_offload_tlv_param, WMITLV_GET_STRUCT_TLVLEN @@ -6787,11 +6801,8 @@ static QDF_STATUS send_roam_scan_offload_mode_cmd_tlv(wmi_unified_t wmi_handle, buf_ptr += WMI_TLV_HDR_SIZE; WMI_LOGD("pmk_len = %d", roam_offload_11i->pmk_len); - if (roam_offload_11i->pmk_len) - QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_WMI, - QDF_TRACE_LEVEL_DEBUG, - roam_offload_11i->pmk, - roam_offload_11i->pmk_len); + WMI_LOGD("pmk_ext_len = %d", + roam_offload_11i->pmk_ext_len); } } else { WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC, @@ -25211,6 +25222,10 @@ static void populate_tlv_service(uint32_t *wmi_service) WMI_SERVICE_AUDIO_SYNC_SUPPORT; wmi_service[wmi_roam_scan_chan_list_to_host_support] = WMI_SERVICE_ROAM_SCAN_CHANNEL_LIST_TO_HOST_SUPPORT; + wmi_service[wmi_service_host_scan_stop_vdev_all] = + WMI_SERVICE_HOST_SCAN_STOP_VDEV_ALL_SUPPORT; + wmi_service[wmi_service_suiteb_roam_support] = + WMI_SERVICE_WPA3_SUITEB_ROAM_SUPPORT; } #ifndef CONFIG_MCL diff --git a/drivers/staging/qcacld-3.0/Kbuild b/drivers/staging/qcacld-3.0/Kbuild index 7515219db76c..0e993dd6d60b 100644 --- a/drivers/staging/qcacld-3.0/Kbuild +++ b/drivers/staging/qcacld-3.0/Kbuild @@ -2130,6 +2130,10 @@ ifeq ($(CONFIG_QCA6290_11AX), y) cppflags-y += -DQCA_WIFI_QCA6290_11AX endif +# Enable Low latency optimisation mode +cppflags-$(CONFIG_WLAN_FEATURE_LL_MODE) += -DWLAN_FEATURE_LL_MODE + +cppflags-$(CONFIG_WLAN_CLD_PM_QOS) += -DCLD_PM_QOS cppflags-$(CONFIG_WLAN_FEATURE_11AX) += -DWLAN_FEATURE_11AX cppflags-$(CONFIG_WLAN_FEATURE_11AX) += -DWLAN_FEATURE_11AX_BSS_COLOR diff --git a/drivers/staging/qcacld-3.0/configs/default_defconfig b/drivers/staging/qcacld-3.0/configs/default_defconfig index e1e0925657c7..ef153325d613 100644 --- a/drivers/staging/qcacld-3.0/configs/default_defconfig +++ b/drivers/staging/qcacld-3.0/configs/default_defconfig @@ -298,6 +298,7 @@ CONFIG_DP_LFR := y CONFIG_HTT_PADDR64 := y CONFIG_RX_OL := y CONFIG_TX_TID_OVERRIDE := y +CONFIG_WLAN_CLD_PM_QOS := y endif #Enable WLAN/Power debugfs feature only if debug_fs is enabled diff --git a/drivers/staging/qcacld-3.0/configs/qcs40x.snoc.perf_defconfig b/drivers/staging/qcacld-3.0/configs/qcs40x.snoc.perf_defconfig index 1dd63fcb781b..0452f8be72be 100644 --- a/drivers/staging/qcacld-3.0/configs/qcs40x.snoc.perf_defconfig +++ b/drivers/staging/qcacld-3.0/configs/qcs40x.snoc.perf_defconfig @@ -168,6 +168,11 @@ CONFIG_WLAN_FEATURE_SAE := y CONFIG_GTK_OFFLOAD := y CONFIG_QCACLD_FEATURE_COEX_CONFIG := y CONFIG_QCACLD_FEATURE_MPTA_HELPER := y +CONFIG_WLAN_FEATURE_LL_MODE := y + +ifeq ($(CONFIG_WLAN_FEATURE_LL_MODE), y) + CONFIG_WLAN_CLD_PM_QOS := y +endif ifneq ($(DEVELOPER_DISABLE_BUILD_TIMESTAMP), y) ifneq ($(WLAN_DISABLE_BUILD_TAG), y) diff --git a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx.c b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx.c index 093b882c60ea..c53d00b4aa41 100644 --- a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx.c +++ b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx.c @@ -130,6 +130,78 @@ extern void ol_txrx_get_pn_info(void *ppeer, uint8_t **last_pn_valid, /* thresh for peer's cached buf queue beyond which the elements are dropped */ #define OL_TXRX_CACHED_BUFQ_THRESH 128 +#ifdef DP_SUPPORT_RECOVERY_NOTIFY +static +int ol_peer_recovery_notifier_cb(struct notifier_block *block, + unsigned long state, void *data) +{ + struct qdf_notifer_data *notif_data = data; + qdf_notif_block *notif_block; + struct ol_txrx_peer_t *peer; + struct peer_hang_data hang_data; + enum peer_debug_id_type dbg_id; + + if (!data || !block) + return -EINVAL; + + notif_block = qdf_container_of(block, qdf_notif_block, notif_block); + + peer = notif_block->priv_data; + if (!peer) + return -EINVAL; + + if (notif_data->offset >= QDF_WLAN_MAX_HOST_OFFSET) + return NOTIFY_STOP_MASK; + + QDF_HANG_EVT_SET_HDR(&hang_data.tlv_header, + HANG_EVT_TAG_DP_PEER_INFO, + QDF_HANG_GET_STRUCT_TLVLEN(struct peer_hang_data)); + + qdf_mem_copy(&hang_data.peer_mac_addr, &peer->mac_addr.raw, + QDF_MAC_ADDR_SIZE); + + for (dbg_id = 0; dbg_id < PEER_DEBUG_ID_MAX; dbg_id++) + if (qdf_atomic_read(&peer->access_list[dbg_id])) + hang_data.peer_timeout_bitmask |= (1 << dbg_id); + + qdf_mem_copy(notif_data->hang_data + notif_data->offset, + &hang_data, sizeof(struct peer_hang_data)); + notif_data->offset += sizeof(struct peer_hang_data); + + return 0; +} + +static qdf_notif_block ol_peer_recovery_notifier = { + .notif_block.notifier_call = ol_peer_recovery_notifier_cb, +}; + +static +QDF_STATUS ol_register_peer_recovery_notifier(struct ol_txrx_peer_t *peer) +{ + ol_peer_recovery_notifier.priv_data = peer; + + return qdf_hang_event_register_notifier(&ol_peer_recovery_notifier); +} + +static +QDF_STATUS ol_unregister_peer_recovery_notifier(void) +{ + return qdf_hang_event_unregister_notifier(&ol_peer_recovery_notifier); +} +#else +static inline +QDF_STATUS ol_register_peer_recovery_notifier(struct ol_txrx_peer_t *peer) +{ + return QDF_STATUS_SUCCESS; +} + +static +QDF_STATUS ol_unregister_peer_recovery_notifier(void) +{ + return QDF_STATUS_SUCCESS; +} +#endif + #if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS) /** @@ -2393,6 +2465,7 @@ static void ol_txrx_pdev_detach(struct cdp_pdev *ppdev, int force) ol_txrx_pdev_grp_stat_destroy(pdev); ol_txrx_debugfs_exit(pdev); + ol_unregister_peer_recovery_notifier(); qdf_mem_free(pdev); } @@ -3987,63 +4060,6 @@ static QDF_STATUS ol_txrx_clear_peer(struct cdp_pdev *ppdev, uint8_t sta_id) return status; } -#ifdef DP_SUPPORT_RECOVERY_NOTIFY -static -int ol_peer_recovery_notifier_cb(struct notifier_block *block, - unsigned long state, void *data) -{ - struct qdf_notifer_data *notif_data = data; - qdf_notif_block *notif_block; - struct ol_txrx_peer_t *peer; - struct peer_hang_data hang_data; - enum peer_debug_id_type dbg_id; - - if (!data || !block) - return -EINVAL; - - notif_block = qdf_container_of(block, qdf_notif_block, notif_block); - - peer = notif_block->priv_data; - if (!peer) - return -EINVAL; - - QDF_HANG_EVT_SET_HDR(&hang_data.tlv_header, - HANG_EVT_TAG_DP_PEER_INFO, - QDF_HANG_GET_STRUCT_TLVLEN(struct peer_hang_data)); - - qdf_mem_copy(&hang_data.peer_mac_addr, &peer->mac_addr.raw, - QDF_MAC_ADDR_SIZE); - - for (dbg_id = 0; dbg_id < PEER_DEBUG_ID_MAX; dbg_id++) - if (qdf_atomic_read(&peer->access_list[dbg_id])) - hang_data.peer_timeout_bitmask |= (1 << dbg_id); - - qdf_mem_copy(notif_data->hang_data + notif_data->offset, - &hang_data, sizeof(struct peer_hang_data)); - notif_data->offset += sizeof(struct peer_hang_data); - - return 0; -} - -static qdf_notif_block ol_peer_recovery_notifier = { - .notif_block.notifier_call = ol_peer_recovery_notifier_cb, -}; - -static -QDF_STATUS ol_register_peer_recovery_notifier(struct ol_txrx_peer_t *peer) -{ - ol_peer_recovery_notifier.priv_data = peer; - - return qdf_hang_event_register_notifier(&ol_peer_recovery_notifier); -} -#else -static inline -QDF_STATUS ol_register_peer_recovery_notifier(struct ol_txrx_peer_t *peer) -{ - return QDF_STATUS_SUCCESS; -} -#endif - /** * peer_unmap_timer_handler() - peer unmap timer function * @data: peer object pointer diff --git a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h index 0e1a12e7d1cd..7f5bc965daca 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h +++ b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h @@ -47,6 +47,9 @@ #include #include #include +#ifdef CLD_PM_QOS +#include +#endif #include #include #include @@ -1996,6 +1999,7 @@ struct hdd_context { int radio_index; qdf_work_t sap_pre_cac_work; bool hbw_requested; + bool llm_enabled; uint32_t last_nil_scan_bug_report_timestamp; enum RX_OFFLOAD ol_enable; #ifdef WLAN_FEATURE_NAN_DATAPATH @@ -2102,6 +2106,10 @@ struct hdd_context { qdf_atomic_t sar_safety_req_resp_event_in_progress; #endif bool roam_ch_from_fw_supported; + +#ifdef CLD_PM_QOS + struct pm_qos_request pm_qos_req; +#endif }; /** diff --git a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_power.h b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_power.h index e8675af1ed2e..28af53248396 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_power.h +++ b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_power.h @@ -33,6 +33,8 @@ #define HDD_WAKELOCK_TIMEOUT_CONNECT 1000 #define HDD_WAKELOCK_TIMEOUT_RESUME 1000 +#define DISABLE_KRAIT_IDLE_PS_VAL 1 + /* * HDD_WAKELOCK_CONNECT_COMPLETE = CSR_JOIN_FAILURE_TIMEOUT_DEFAULT (3000) + * WNI_CFG_AUTHENTICATE_FAILURE_TIMEOUT_STADEF (1000) + diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_assoc.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_assoc.c index d849ac6dc42a..587a6b1d92dc 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_assoc.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_assoc.c @@ -3668,7 +3668,7 @@ bool hdd_save_peer(struct hdd_station_ctx *sta_ctx, uint8_t sta_id, { int idx; - for (idx = 0; idx < SIR_MAX_NUM_STA_IN_IBSS; idx++) { + for (idx = 0; idx < MAX_PEERS; idx++) { if (HDD_WLAN_INVALID_STA_ID == sta_ctx->conn_info.staId[idx]) { hdd_debug("adding peer: %pM, sta_id: %d, at idx: %d", peer_mac_addr, sta_id, idx); @@ -3693,7 +3693,7 @@ void hdd_delete_peer(struct hdd_station_ctx *sta_ctx, uint8_t sta_id) { int i; - for (i = 0; i < SIR_MAX_NUM_STA_IN_IBSS; i++) { + for (i = 0; i < MAX_PEERS; i++) { if (sta_id == sta_ctx->conn_info.staId[i]) { sta_ctx->conn_info.staId[i] = HDD_WLAN_INVALID_STA_ID; return; @@ -3706,7 +3706,7 @@ bool hdd_any_valid_peer_present(struct hdd_adapter *adapter) struct hdd_station_ctx *sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter); int idx; - for (idx = 0; idx < SIR_MAX_NUM_STA_IN_IBSS; idx++) + for (idx = 0; idx < MAX_PEERS; idx++) if (HDD_WLAN_INVALID_STA_ID != sta_ctx->conn_info.staId[idx]) return true; diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c index 8280839664b3..ace993decba6 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c @@ -7757,6 +7757,28 @@ static int hdd_config_disconnect_ies(struct hdd_adapter *adapter, return qdf_status_to_os_return(status); } +#if defined(CLD_PM_QOS) && defined(WLAN_FEATURE_LL_MODE) +void wlan_hdd_set_wlm_mode(struct hdd_context *hdd_ctx, uint16_t latency_level) +{ + if (latency_level == + QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL_ULTRALOW) { + hdd_ctx->llm_enabled = true; + if (!hdd_ctx->hbw_requested) { + pm_qos_update_request(&hdd_ctx->pm_qos_req, + DISABLE_KRAIT_IDLE_PS_VAL); + hdd_ctx->hbw_requested = true; + } + } else { + if (hdd_ctx->hbw_requested) { + pm_qos_update_request(&hdd_ctx->pm_qos_req, + PM_QOS_DEFAULT_VALUE); + hdd_ctx->hbw_requested = false; + } + hdd_ctx->llm_enabled = false; + } +} +#endif + /** * __wlan_hdd_cfg80211_wifi_configuration_set() - Wifi configuration * vendor command @@ -8314,6 +8336,8 @@ __wlan_hdd_cfg80211_wifi_configuration_set(struct wiphy *wiphy, return -EINVAL; } + wlan_hdd_set_wlm_mode(hdd_ctx, latency_level); + /* Mapping the latency value to the level which fw expected * 0 - normal, 1 - moderate, 2 - low, 3 - ultralow */ @@ -12844,6 +12868,8 @@ static void hdd_sar_safety_timer_cb(void *user_data) void wlan_hdd_sar_unsolicited_timer_start(struct hdd_context *hdd_ctx) { + QDF_STATUS status; + if (!hdd_ctx->config->enable_sar_safety) return; @@ -12853,14 +12879,20 @@ void wlan_hdd_sar_unsolicited_timer_start(struct hdd_context *hdd_ctx) if (QDF_TIMER_STATE_RUNNING != qdf_mc_timer_get_current_state( - &hdd_ctx->sar_safety_unsolicited_timer)) - qdf_mc_timer_start( + &hdd_ctx->sar_safety_unsolicited_timer)) { + status = qdf_mc_timer_start( &hdd_ctx->sar_safety_unsolicited_timer, hdd_ctx->config->sar_safety_unsolicited_timeout); + + if (QDF_IS_STATUS_SUCCESS(status)) + hdd_nofl_debug("sar unsolicited timer started"); + } } void wlan_hdd_sar_timers_reset(struct hdd_context *hdd_ctx) { + QDF_STATUS status; + if (!hdd_ctx->config->enable_sar_safety) return; @@ -12868,16 +12900,26 @@ void wlan_hdd_sar_timers_reset(struct hdd_context *hdd_ctx) return; if (QDF_TIMER_STATE_RUNNING == - qdf_mc_timer_get_current_state(&hdd_ctx->sar_safety_timer)) - qdf_mc_timer_stop(&hdd_ctx->sar_safety_timer); + qdf_mc_timer_get_current_state(&hdd_ctx->sar_safety_timer)) { + status = qdf_mc_timer_stop(&hdd_ctx->sar_safety_timer); + if (QDF_IS_STATUS_SUCCESS(status)) + hdd_nofl_debug("sar safety timer stopped"); + } - qdf_mc_timer_start(&hdd_ctx->sar_safety_timer, - hdd_ctx->config->sar_safety_timeout); + status = qdf_mc_timer_start( + &hdd_ctx->sar_safety_timer, + hdd_ctx->config->sar_safety_timeout); + if (QDF_IS_STATUS_SUCCESS(status)) + hdd_nofl_debug("sar safety timer started"); if (QDF_TIMER_STATE_RUNNING == qdf_mc_timer_get_current_state( - &hdd_ctx->sar_safety_unsolicited_timer)) - qdf_mc_timer_stop(&hdd_ctx->sar_safety_unsolicited_timer); + &hdd_ctx->sar_safety_unsolicited_timer)) { + status = qdf_mc_timer_stop( + &hdd_ctx->sar_safety_unsolicited_timer); + if (QDF_IS_STATUS_SUCCESS(status)) + hdd_nofl_debug("sar unsolicited timer stopped"); + } qdf_event_set(&hdd_ctx->sar_safety_req_resp_event); } @@ -12887,6 +12929,8 @@ void wlan_hdd_sar_timers_init(struct hdd_context *hdd_ctx) if (!hdd_ctx->config->enable_sar_safety) return; + hdd_enter(); + qdf_mc_timer_init(&hdd_ctx->sar_safety_timer, QDF_TIMER_TYPE_SW, hdd_sar_safety_timer_cb, hdd_ctx); @@ -12897,6 +12941,7 @@ void wlan_hdd_sar_timers_init(struct hdd_context *hdd_ctx) qdf_atomic_init(&hdd_ctx->sar_safety_req_resp_event_in_progress); qdf_event_create(&hdd_ctx->sar_safety_req_resp_event); + hdd_exit(); } void wlan_hdd_sar_timers_deinit(struct hdd_context *hdd_ctx) @@ -12904,6 +12949,8 @@ void wlan_hdd_sar_timers_deinit(struct hdd_context *hdd_ctx) if (!hdd_ctx->config->enable_sar_safety) return; + hdd_enter(); + if (QDF_TIMER_STATE_RUNNING == qdf_mc_timer_get_current_state(&hdd_ctx->sar_safety_timer)) qdf_mc_timer_stop(&hdd_ctx->sar_safety_timer); @@ -12918,6 +12965,8 @@ void wlan_hdd_sar_timers_deinit(struct hdd_context *hdd_ctx) qdf_mc_timer_destroy(&hdd_ctx->sar_safety_unsolicited_timer); qdf_event_destroy(&hdd_ctx->sar_safety_req_resp_event); + + hdd_exit(); } #endif @@ -21815,6 +21864,12 @@ wlan_hdd_get_cfg80211_disconnect_reason(struct hdd_adapter *adapter, if (reason >= eSIR_MAC_REASON_PROP_START) { adapter->last_disconnect_reason = wlan_hdd_sir_mac_to_qca_reason(reason); + /* + * Applications expect reason code as 0 for beacon miss failure + * due to backward compatibility. So send ieee80211_reason as 0. + */ + if (reason == eSIR_MAC_BEACON_MISSED) + ieee80211_reason = 0; } else { ieee80211_reason = (enum ieee80211_reasoncode)reason; adapter->last_disconnect_reason = diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.h b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.h index 787629f082c8..d16a0eef0ce5 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.h +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.h @@ -793,4 +793,20 @@ static inline void hdd_configure_sar_resume_index(struct hdd_context *hdd_ctx) #endif +/** + * wlan_hdd_set_wlm_mode() - Function to set pm_qos config in wlm mode + * @hdd_ctx: HDD context + * @latency level: latency value received + * + * Return: None + */ +#if defined(CLD_PM_QOS) && defined(WLAN_FEATURE_LL_MODE) +void wlan_hdd_set_wlm_mode(struct hdd_context *hdd_ctx, uint16_t latency_level); +#else +static inline +void wlan_hdd_set_wlm_mode(struct hdd_context *hdd_ctx, uint16_t latency_level) +{ +} +#endif + #endif diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_driver_ops.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_driver_ops.c index a400222d52e5..634256ed13c8 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_driver_ops.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_driver_ops.c @@ -49,8 +49,6 @@ #define WLAN_MODULE_NAME "wlan" #endif -#define DISABLE_KRAIT_IDLE_PS_VAL 1 - #define SSR_MAX_FAIL_CNT 3 static uint8_t re_init_fail_cnt, probe_fail_cnt; diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c index df48da9809a2..cf25f81c8396 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c @@ -8563,6 +8563,97 @@ static void hdd_clear_rps_cpu_mask(struct hdd_context *hdd_ctx) hdd_send_rps_disable_ind(adapter); } +#ifdef CLD_PM_QOS +#define PLD_REMOVE_PM_QOS(x) +#define PLD_REQUEST_PM_QOS(x, y) +/** + * hdd_pm_qos_update_cpu_mask() - Prepare CPU mask for PM_qos voting + * @mask: return variable of cpumask for the TPUT + * @high_throughput: only update high cores mask for high TPUT + * + * Return: none + */ +static inline void hdd_pm_qos_update_cpu_mask(cpumask_t *mask, + bool high_throughput) +{ + cpumask_set_cpu(0, mask); + cpumask_set_cpu(1, mask); + cpumask_set_cpu(2, mask); + cpumask_set_cpu(3, mask); + + if (high_throughput) { + /* For high TPUT include GOLD mask also */ + cpumask_set_cpu(4, mask); + cpumask_set_cpu(5, mask); + cpumask_set_cpu(6, mask); + } +} + +/** + * hdd_pm_qos_update_request() - API to request for pm_qos + * @hdd_ctx: handle to hdd context + * @pm_qos_cpu_mask: cpu_mask to apply + * + * Return: none + */ +static inline void hdd_pm_qos_update_request(struct hdd_context *hdd_ctx, + cpumask_t *pm_qos_cpu_mask) +{ + cpumask_copy(&hdd_ctx->pm_qos_req.cpus_affine, pm_qos_cpu_mask); + /* Latency value to be read from INI */ + pm_qos_update_request(&hdd_ctx->pm_qos_req, 1); +} + +#ifdef CONFIG_SMP +/** + * hdd_update_pm_qos_affine_cores() - Update PM_qos request for AFFINE_CORES + * @hdd_ctx: handle to hdd context + * + * Return: none + */ +static inline void hdd_update_pm_qos_affine_cores(struct hdd_context *hdd_ctx) +{ + hdd_ctx->pm_qos_req.type = PM_QOS_REQ_AFFINE_CORES; +} +#else +static inline void hdd_update_pm_qos_affine_cores(struct hdd_context *hdd_ctx) +{ +} +#endif +static inline void hdd_pm_qos_add_request(struct hdd_context *hdd_ctx) +{ + hdd_update_pm_qos_affine_cores(hdd_ctx); + pm_qos_add_request(&hdd_ctx->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); +} + +static inline void hdd_pm_qos_remove_request(struct hdd_context *hdd_ctx) +{ + pm_qos_remove_request(&hdd_ctx->pm_qos_req); +} +#else +#define PLD_REMOVE_PM_QOS(x) pld_remove_pm_qos(x) +#define PLD_REQUEST_PM_QOS(x, y) pld_request_pm_qos(x, y) + +static inline void hdd_pm_qos_add_request(struct hdd_context *hdd_ctx) +{ +} + +static inline void hdd_pm_qos_remove_request(struct hdd_context *hdd_ctx) +{ +} + +static inline void hdd_pm_qos_update_cpu_mask(cpumask_t *mask, + bool high_throughput) +{ +} + +static inline void hdd_pm_qos_update_request(struct hdd_context *hdd_ctx, + cpumask_t *pm_qos_cpu_mask) +{ +} +#endif + /** * hdd_pld_request_bus_bandwidth() - Function to control bus bandwidth * @hdd_ctx - handle to hdd context @@ -8593,6 +8684,10 @@ static void hdd_pld_request_bus_bandwidth(struct hdd_context *hdd_ctx, bool tx_level_change = false; bool rxthread_high_tput_req = false; bool dptrace_high_tput_req; + cpumask_t pm_qos_cpu_mask; + bool enable_pm_qos_high = false; + + cpumask_clear(&pm_qos_cpu_mask); if (total_pkts > hdd_ctx->config->busBandwidthHighThreshold) next_vote_level = PLD_BUS_WIDTH_HIGH; else if (total_pkts > hdd_ctx->config->busBandwidthMediumThreshold) @@ -8613,15 +8708,15 @@ static void hdd_pld_request_bus_bandwidth(struct hdd_context *hdd_ctx, pld_request_bus_bandwidth(hdd_ctx->parent_dev, next_vote_level); if ((next_vote_level == PLD_BUS_WIDTH_LOW) || (next_vote_level == PLD_BUS_WIDTH_NONE)) { - if (hdd_ctx->hbw_requested) { - pld_remove_pm_qos(hdd_ctx->parent_dev); + if (hdd_ctx->hbw_requested && !hdd_ctx->llm_enabled) { + PLD_REMOVE_PM_QOS(hdd_ctx->parent_dev); hdd_ctx->hbw_requested = false; } if (hdd_ctx->dynamic_rps) hdd_clear_rps_cpu_mask(hdd_ctx); } else { if (!hdd_ctx->hbw_requested) { - pld_request_pm_qos(hdd_ctx->parent_dev, 1); + PLD_REQUEST_PM_QOS(hdd_ctx->parent_dev, 1); hdd_ctx->hbw_requested = true; } if (hdd_ctx->dynamic_rps) @@ -8678,10 +8773,15 @@ static void hdd_pld_request_bus_bandwidth(struct hdd_context *hdd_ctx, * 3)For UDP cases */ if (avg_no_rx_offload_pkts > - hdd_ctx->config->busBandwidthHighThreshold) + hdd_ctx->config->busBandwidthHighThreshold) { rxthread_high_tput_req = true; - else + enable_pm_qos_high = true; + } else { rxthread_high_tput_req = false; + enable_pm_qos_high = false; + } + + hdd_pm_qos_update_cpu_mask(&pm_qos_cpu_mask, enable_pm_qos_high); if (cds_sched_handle_throughput_req(rxthread_high_tput_req)) hdd_warn("Rx thread high_tput(%d) affinity request failed", @@ -8723,6 +8823,14 @@ static void hdd_pld_request_bus_bandwidth(struct hdd_context *hdd_ctx, /* fine-tuning parameters for TX Flows */ temp_tx = (tx_packets + hdd_ctx->prev_tx) / 2; hdd_ctx->prev_tx = tx_packets; + + if (temp_tx > hdd_ctx->config->busBandwidthHighThreshold) + enable_pm_qos_high = true; + else + enable_pm_qos_high = false; + + hdd_pm_qos_update_cpu_mask(&pm_qos_cpu_mask, enable_pm_qos_high); + if (temp_tx > hdd_ctx->config->tcp_tx_high_tput_thres) next_tx_level = WLAN_SVC_TP_HIGH; else @@ -8753,6 +8861,12 @@ static void hdd_pld_request_bus_bandwidth(struct hdd_context *hdd_ctx, hdd_ctx->hdd_txrx_hist_idx &= NUM_TX_RX_HISTOGRAM_MASK; } + /* Clear all the mask if no silver/gold vote is required */ + if (next_vote_level < PLD_BUS_WIDTH_MEDIUM) + cpumask_clear(&pm_qos_cpu_mask); + + hdd_pm_qos_update_request(hdd_ctx, &pm_qos_cpu_mask); + hdd_display_periodic_stats(hdd_ctx, (total_pkts > 0) ? true : false); hdd_periodic_sta_stats_display(hdd_ctx); @@ -8918,6 +9032,9 @@ int hdd_bus_bandwidth_init(struct hdd_context *hdd_ctx) hdd_enter(); qdf_spinlock_create(&hdd_ctx->bus_bw_lock); + + hdd_pm_qos_add_request(hdd_ctx); + INIT_WORK(&hdd_ctx->bus_bw_work, hdd_bus_bw_work_handler); hdd_ctx->bus_bw_timer_running = false; qdf_spinlock_create(&hdd_ctx->bus_bw_timer_lock); @@ -8938,6 +9055,7 @@ void hdd_bus_bandwidth_deinit(struct hdd_context *hdd_ctx) qdf_timer_free(&hdd_ctx->bus_bw_timer); qdf_spinlock_destroy(&hdd_ctx->bus_bw_timer_lock); qdf_spinlock_destroy(&hdd_ctx->bus_bw_lock); + hdd_pm_qos_remove_request(hdd_ctx); hdd_exit(); } @@ -10613,6 +10731,7 @@ static int hdd_open_interfaces(struct hdd_context *hdd_ctx, bool rtnl_held) struct hdd_adapter *adapter; enum QDF_GLOBAL_MODE curr_mode; int ret; + bool nan_iface_support; curr_mode = hdd_get_conparam(); /* open monitor mode adapter if con_mode is monitor mode */ @@ -10656,8 +10775,13 @@ static int hdd_open_interfaces(struct hdd_context *hdd_ctx, bool rtnl_held) if (ret) goto err_close_adapters; - if (hdd_ctx->nan_seperate_vdev_supported && - wlan_hdd_nan_separate_iface_supported(hdd_ctx)) { + nan_iface_support = wlan_hdd_nan_separate_iface_supported(hdd_ctx); + if (!hdd_ctx->nan_seperate_vdev_supported || !nan_iface_support) + hdd_debug("NAN separate vdev%s supported by host,%s supported by firmware", + nan_iface_support ? "" : " not", + hdd_ctx->nan_seperate_vdev_supported ? "" : " not"); + + if (hdd_ctx->nan_seperate_vdev_supported && nan_iface_support) { adapter = hdd_open_adapter(hdd_ctx, QDF_NAN_DISC_MODE, "wifi-aware%d", wlan_hdd_get_intf_addr(hdd_ctx, QDF_NAN_DISC_MODE), diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_stats.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_stats.c index c65a3c60b3f2..b3b05c8d8ebc 100644 --- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_stats.c +++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_stats.c @@ -997,13 +997,16 @@ static void hdd_link_layer_process_radio_stats(struct hdd_adapter *adapter, tpSirWifiRadioStat pData, u32 num_radio) { - int status, i, nr, ret; + int i, nr, ret; tSirWifiRadioStat *pWifiRadioStat = pData; - struct hdd_context *hdd_ctx = WLAN_HDD_GET_CTX(adapter); - status = wlan_hdd_validate_context(hdd_ctx); - if (0 != status) - return; + /* + * There is no need for wlan_hdd_validate_context here. This is a NB + * operation that will come with DSC synchronization. This ensures that + * no driver transition will take place as long as this operation is + * not complete. Thus the need to check validity of hdd_context is not + * required. + */ hdd_debug("LL_STATS_RADIO: number of radios: %u", num_radio); @@ -1210,6 +1213,8 @@ static void hdd_debugfs_process_ll_stats(struct hdd_adapter *adapter, if (!results->num_peers) priv->request_bitmap &= ~(WMI_LINK_STATS_ALL_PEER); + + priv->request_bitmap &= ~(WMI_LINK_STATS_IFACE); } else if (results->paramId & WMI_LINK_STATS_ALL_PEER) { hdd_debugfs_process_peer_stats(adapter, results->results); if (!results->moreResultToFollow) @@ -1512,6 +1517,48 @@ static void wlan_hdd_handle_ll_stats(struct hdd_adapter *adapter, } } +static void wlan_hdd_dealloc_ll_stats(void *priv) +{ + struct hdd_ll_stats_priv *ll_stats_priv = priv; + struct hdd_ll_stats *stats = NULL; + QDF_STATUS status; + qdf_list_node_t *ll_node; + + if (!ll_stats_priv) + return; + + qdf_spin_lock(&ll_stats_priv->ll_stats_lock); + status = qdf_list_remove_front(&ll_stats_priv->ll_stats_q, &ll_node); + qdf_spin_unlock(&ll_stats_priv->ll_stats_lock); + while (QDF_IS_STATUS_SUCCESS(status)) { + stats = qdf_container_of(ll_node, struct hdd_ll_stats, + ll_stats_node); + + if (stats->result_param_id == WMI_LINK_STATS_RADIO) { + tpSirWifiRadioStat radio_stat = stats->result; + int i; + int num_radio = stats->stats_nradio_npeer.no_of_radios; + + for (i = 0; i < num_radio; i++) { + if (radio_stat->numChannels) + qdf_mem_free(radio_stat->channels); + if (radio_stat->total_num_tx_power_levels) + qdf_mem_free(radio_stat-> + tx_time_per_power_level); + radio_stat++; + } + } + + qdf_mem_free(stats->result); + qdf_mem_free(stats); + qdf_spin_lock(&ll_stats_priv->ll_stats_lock); + status = qdf_list_remove_front(&ll_stats_priv->ll_stats_q, + &ll_node); + qdf_spin_unlock(&ll_stats_priv->ll_stats_lock); + } + qdf_list_destroy(&ll_stats_priv->ll_stats_q); +} + static int wlan_hdd_send_ll_stats_req(struct hdd_adapter *adapter, tSirLLStatsGetReq *req) { @@ -1526,6 +1573,7 @@ static int wlan_hdd_send_ll_stats_req(struct hdd_adapter *adapter, static const struct osif_request_params params = { .priv_size = sizeof(*priv), .timeout_ms = WLAN_WAIT_TIME_LL_STATS, + .dealloc = wlan_hdd_dealloc_ll_stats, }; hdd_enter(); @@ -6149,6 +6197,12 @@ int wlan_hdd_get_station_stats(struct hdd_adapter *adapter) tx_nss = wlan_vdev_mlme_get_nss(adapter->vdev); rx_nss = wlan_vdev_mlme_get_nss(adapter->vdev); } + /* Intersection of self and AP's NSS capability */ + if (tx_nss > wlan_vdev_mlme_get_nss(adapter->vdev)) + tx_nss = wlan_vdev_mlme_get_nss(adapter->vdev); + + if (rx_nss > wlan_vdev_mlme_get_nss(adapter->vdev)) + rx_nss = wlan_vdev_mlme_get_nss(adapter->vdev); /* save class a stats to legacy location */ adapter->hdd_stats.class_a_stat.tx_nss = tx_nss; @@ -6440,7 +6494,6 @@ static void hdd_lost_link_cp_stats_info_cb(void *stats_ev) struct hdd_adapter *adapter; struct stats_event *ev = stats_ev; uint8_t i; - struct hdd_station_ctx *sta_ctx; if (wlan_hdd_validate_context(hdd_ctx)) return; @@ -6453,16 +6506,11 @@ static void hdd_lost_link_cp_stats_info_cb(void *stats_ev) hdd_debug("invalid adapter"); continue; } - sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter); - if ((sta_ctx) && - (eConnectionState_Associated != - sta_ctx->conn_info.connState)) { - adapter->rssi_on_disconnect = + adapter->rssi_on_disconnect = ev->vdev_summary_stats[i].stats.rssi; - hdd_debug("rssi on disconnect %d for " QDF_MAC_ADDR_STR, - adapter->rssi_on_disconnect, - QDF_MAC_ADDR_ARRAY(adapter->mac_addr.bytes)); - } + hdd_debug("rssi %d for " QDF_MAC_ADDR_STR, + adapter->rssi_on_disconnect, + QDF_MAC_ADDR_ARRAY(adapter->mac_addr.bytes)); } } diff --git a/drivers/staging/qcacld-3.0/core/mac/inc/ani_global.h b/drivers/staging/qcacld-3.0/core/mac/inc/ani_global.h index c561273c5d48..db0a33d5c949 100644 --- a/drivers/staging/qcacld-3.0/core/mac/inc/ani_global.h +++ b/drivers/staging/qcacld-3.0/core/mac/inc/ani_global.h @@ -751,7 +751,8 @@ struct mgmt_frm_reg_info { }; typedef struct sRrmContext { - tRrmSMEContext rrmSmeContext; + struct rrm_config_param rrmConfig; + tRrmSMEContext rrmSmeContext[MAX_MEASUREMENT_REQUEST]; tRrmPEContext rrmPEContext; } tRrmContext, *tpRrmContext; @@ -848,6 +849,7 @@ typedef struct sAniSirGlobal { uint8_t beacon_offload; bool pmf_offload; bool is_fils_roaming_supported; + bool stop_all_host_scan_support; bool enable5gEBT; uint8_t f_prefer_non_dfs_on_radar; uint32_t fEnableDebugLog; diff --git a/drivers/staging/qcacld-3.0/core/mac/inc/qwlan_version.h b/drivers/staging/qcacld-3.0/core/mac/inc/qwlan_version.h index 19d9eb0439b5..f0dff5dc9dde 100644 --- a/drivers/staging/qcacld-3.0/core/mac/inc/qwlan_version.h +++ b/drivers/staging/qcacld-3.0/core/mac/inc/qwlan_version.h @@ -32,9 +32,9 @@ #define QWLAN_VERSION_MAJOR 5 #define QWLAN_VERSION_MINOR 2 #define QWLAN_VERSION_PATCH 03 -#define QWLAN_VERSION_EXTRA "T" -#define QWLAN_VERSION_BUILD 26 +#define QWLAN_VERSION_EXTRA "K" +#define QWLAN_VERSION_BUILD 27 -#define QWLAN_VERSIONSTR "5.2.03.26T" +#define QWLAN_VERSIONSTR "5.2.03.27K" #endif /* QWLAN_VERSION_H */ diff --git a/drivers/staging/qcacld-3.0/core/mac/inc/sir_api.h b/drivers/staging/qcacld-3.0/core/mac/inc/sir_api.h index d222721de8f1..3191ff451746 100644 --- a/drivers/staging/qcacld-3.0/core/mac/inc/sir_api.h +++ b/drivers/staging/qcacld-3.0/core/mac/inc/sir_api.h @@ -171,6 +171,7 @@ struct mlme_roam_debug_info { #define AKM_FT_FILS 2 #define AKM_SAE 3 #define AKM_OWE 4 +#define AKM_SUITEB 5 /** * enum sir_roam_op_code - Operation to be done by the callback. diff --git a/drivers/staging/qcacld-3.0/core/mac/src/cfg/cfg_api.c b/drivers/staging/qcacld-3.0/core/mac/src/cfg/cfg_api.c index 05c705c18f71..f497560db101 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/cfg/cfg_api.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/cfg/cfg_api.c @@ -789,7 +789,7 @@ QDF_STATUS cfg_get_capability_info(tpAniSirGlobal pMac, uint16_t *pCap, if (val) pCapInfo->apsd = 1; - pCapInfo->rrm = pMac->rrm.rrmSmeContext.rrmConfig.rrm_enabled; + pCapInfo->rrm = pMac->rrm.rrmConfig.rrm_enabled; pe_debug("RRM: %d", pCapInfo->rrm); /* DSSS-OFDM */ /* FIXME : no config defined yet. */ diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/include/rrm_api.h b/drivers/staging/qcacld-3.0/core/mac/src/pe/include/rrm_api.h index 1cb926782cd1..f154666d42fd 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/include/rrm_api.h +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/include/rrm_api.h @@ -41,7 +41,15 @@ uint8_t rrm_get_min_of_max_tx_power(tpAniSirGlobal pMac, int8_t regMax, QDF_STATUS rrm_initialize(tpAniSirGlobal pMac); -QDF_STATUS rrm_cleanup(tpAniSirGlobal pMac); +/** + * rrm_cleanup - cleanup RRM measurement related data for the measurement + * index + * @mac: Pointer to mac context + * @idx: Measurement index + * + * Return: None + */ +void rrm_cleanup(tpAniSirGlobal mac, uint8_t idx); QDF_STATUS rrm_process_link_measurement_request(tpAniSirGlobal pMac, uint8_t *pRxPacketInfo, diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/include/rrm_global.h b/drivers/staging/qcacld-3.0/core/mac/src/pe/include/rrm_global.h index 8ba4373fd092..057eab251cc6 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/include/rrm_global.h +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/include/rrm_global.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2012, 2014-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2012, 2014-2018, 2020 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -27,6 +27,9 @@ ========================================================================*/ +#define MAX_MEASUREMENT_REQUEST 2 +#define DEFAULT_RRM_IDX 0 + typedef enum eRrmRetStatus { eRRM_SUCCESS, eRRM_INCAPABLE, @@ -48,6 +51,7 @@ typedef struct sSirChannelInfo { typedef struct sSirBeaconReportReqInd { uint16_t messageType; /* eWNI_SME_BEACON_REPORT_REQ_IND */ uint16_t length; + uint8_t measurement_idx; tSirMacAddr bssId; uint16_t measurementDuration[SIR_ESE_MAX_MEAS_IE_REQS]; /* ms */ uint16_t randomizationInterval; /* ms */ @@ -65,6 +69,7 @@ typedef struct sSirBeaconReportReqInd { typedef struct sSirBeaconReportXmitInd { uint16_t messageType; /* eWNI_SME_BEACON_REPORT_RESP_XMIT_IND */ uint16_t length; + uint8_t measurement_idx; tSirMacAddr bssId; uint16_t uDialogToken; uint8_t fMeasureDone; @@ -130,6 +135,7 @@ typedef struct sSirNeighborReportInd { uint16_t messageType; /* eWNI_SME_NEIGHBOR_REPORT_IND */ uint16_t length; uint8_t sessionId; + uint8_t measurement_idx; uint16_t numNeighborReports; tSirMacAddr bssId; /* For the session. */ tSirNeighborBssDescription sNeighborBssDescription[1]; @@ -147,6 +153,7 @@ typedef struct sRRMBeaconReportRequestedIes { #define BEACON_REPORTING_DETAIL_ALL_FF_IE 2 typedef struct sRRMReq { + uint8_t measurement_idx; /* Index of the measurement report in frame */ uint8_t dialog_token; /* In action frame; */ uint8_t token; /* Within individual request; */ uint8_t type; @@ -210,7 +217,7 @@ typedef struct sRrmPEContext { /* Dialog token for the request initiated from station. */ uint8_t DialogToken; uint16_t prev_rrm_report_seq_num; - tpRRMReq pCurrentReq; + tpRRMReq pCurrentReq[MAX_MEASUREMENT_REQUEST]; } tRrmPEContext, *tpRrmPEContext; /* 2008 11k spec reference: 18.4.8.5 RCPI Measurement */ diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_api.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_api.c index 570ef1aa40d3..e893d982614a 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_api.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_api.c @@ -634,7 +634,8 @@ void lim_cleanup(tpAniSirGlobal pMac) /* Now, finally reset the deferred message queue pointers */ lim_reset_deferred_msg_q(pMac); - rrm_cleanup(pMac); + for (i = 0; i < MAX_MEASUREMENT_REQUEST; i++) + rrm_cleanup(pMac, i); lim_ft_cleanup_all_ft_sessions(pMac); diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_action_frame.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_action_frame.c index 23f7bc291e19..c88ffb56227e 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_action_frame.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_action_frame.c @@ -1310,7 +1310,8 @@ __lim_process_radio_measure_request(tpAniSirGlobal pMac, uint8_t *pRxPacketInfo, HIGH_SEQ_NUM_OFFSET) | pHdr->seqControl.seqNumLo); if (curr_seq_num == pMac->rrm.rrmPEContext.prev_rrm_report_seq_num && - pMac->rrm.rrmPEContext.pCurrentReq) { + (pMac->rrm.rrmPEContext.pCurrentReq[DEFAULT_RRM_IDX] || + pMac->rrm.rrmPEContext.pCurrentReq[DEFAULT_RRM_IDX + 1])) { pe_err("rrm report req frame, seq num: %d is already in progress, drop it", curr_seq_num); return; diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/rrm/rrm_api.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/rrm/rrm_api.c index dc591d0b68e5..c24bb529de42 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/rrm/rrm_api.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/rrm/rrm_api.c @@ -406,6 +406,7 @@ rrm_process_neighbor_report_response(tpAniSirGlobal pMac, pSmeNeighborRpt->messageType = eWNI_SME_NEIGHBOR_REPORT_IND; pSmeNeighborRpt->length = length; + pSmeNeighborRpt->measurement_idx = DEFAULT_RRM_IDX; pSmeNeighborRpt->sessionId = pSessionEntry->smeSessionId; pSmeNeighborRpt->numNeighborReports = pNeighborRep->num_NeighborReport; qdf_mem_copy(pSmeNeighborRpt->bssId, pSessionEntry->bssId, @@ -647,6 +648,7 @@ rrm_process_beacon_report_req(tpAniSirGlobal pMac, pSmeBcnReportReq->measurementDuration[0] = SYS_TU_TO_MS(measDuration); pSmeBcnReportReq->fMeasurementtype[0] = pBeaconReq->measurement_request.Beacon.meas_mode; + pSmeBcnReportReq->measurement_idx = pCurrentReq->measurement_idx; qdf_mem_copy(pSmeBcnReportReq->macaddrBssid, pBeaconReq->measurement_request.Beacon.BSSID, sizeof(tSirMacAddr)); @@ -844,7 +846,8 @@ rrm_process_beacon_report_xmit(tpAniSirGlobal mac_ctx, tSirMacRadioMeasureReport *report = NULL; tSirMacBeaconReport *beacon_report; tpSirBssDescription bss_desc; - tpRRMReq curr_req = mac_ctx->rrm.rrmPEContext.pCurrentReq; + tpRRMReq curr_req = mac_ctx->rrm.rrmPEContext. + pCurrentReq[beacon_xmit_ind->measurement_idx]; tpPESession session_entry; uint8_t session_id, counter; uint8_t i, j, offset = 0; @@ -854,8 +857,6 @@ rrm_process_beacon_report_xmit(tpAniSirGlobal mac_ctx, uint8_t frag_id = 0; uint8_t num_frames, num_reports_in_frame; - pe_debug("Received beacon report xmit indication"); - if (NULL == beacon_xmit_ind) { pe_err("Received beacon_xmit_ind is NULL in PE"); return QDF_STATUS_E_FAILURE; @@ -867,6 +868,9 @@ rrm_process_beacon_report_xmit(tpAniSirGlobal mac_ctx, goto end; } + pe_debug("Received beacon report xmit indication on idx:%d", + beacon_xmit_ind->measurement_idx); + if ((beacon_xmit_ind->numBssDesc) || curr_req->sendEmptyBcnRpt) { beacon_xmit_ind->numBssDesc = (beacon_xmit_ind->numBssDesc == RRM_BCN_RPT_NO_BSS_INFO) ? RRM_BCN_RPT_MIN_RPT : @@ -1027,7 +1031,7 @@ end: if (beacon_xmit_ind->fMeasureDone) { pe_debug("Measurement done."); - rrm_cleanup(mac_ctx); + rrm_cleanup(mac_ctx, beacon_xmit_ind->measurement_idx); } if (NULL != report) @@ -1036,13 +1040,19 @@ end: return status; } -static void rrm_process_beacon_request_failure(tpAniSirGlobal pMac, - tpPESession pSessionEntry, - tSirMacAddr peer, - tRrmRetStatus status) +static void +rrm_process_beacon_request_failure(tpAniSirGlobal pMac, + tpPESession pSessionEntry, + tSirMacAddr peer, + tRrmRetStatus status, uint8_t index) { tpSirMacRadioMeasureReport pReport = NULL; - tpRRMReq pCurrentReq = pMac->rrm.rrmPEContext.pCurrentReq; + tpRRMReq pCurrentReq = pMac->rrm.rrmPEContext.pCurrentReq[index]; + + if (!pCurrentReq) { + pe_err("Current request is NULL"); + return; + } pReport = qdf_mem_malloc(sizeof(tSirMacRadioMeasureReport)); if (NULL == pReport) { @@ -1082,7 +1092,6 @@ static void rrm_process_beacon_request_failure(tpAniSirGlobal pMac, * @mac_ctx: Global pointer to MAC context * @peer: Macaddress of the peer requesting the radio measurement * @session_entry: session entry - * @curr_req: Pointer to RRM request * @radiomes_report: Pointer to radio measurement report * @rrm_req: Array of Measurement request IEs * @num_report: No.of reports @@ -1095,15 +1104,17 @@ static void rrm_process_beacon_request_failure(tpAniSirGlobal pMac, */ static QDF_STATUS rrm_process_beacon_req(tpAniSirGlobal mac_ctx, tSirMacAddr peer, - tpPESession session_entry, tpRRMReq curr_req, + tpPESession session_entry, tpSirMacRadioMeasureReport *radiomes_report, tDot11fRadioMeasurementRequest *rrm_req, uint8_t *num_report, int index) { tRrmRetStatus rrm_status = eRRM_SUCCESS; tpSirMacRadioMeasureReport report; + tpRRMReq curr_req; - if (curr_req) { + if (index >= MAX_MEASUREMENT_REQUEST || + mac_ctx->rrm.rrmPEContext.pCurrentReq[index]) { if (*radiomes_report == NULL) { /* * Allocate memory to send reports for @@ -1126,24 +1137,32 @@ QDF_STATUS rrm_process_beacon_req(tpAniSirGlobal mac_ctx, tSirMacAddr peer, (*num_report)++; return QDF_STATUS_SUCCESS; } else { + curr_req = mac_ctx->rrm.rrmPEContext.pCurrentReq[index]; + if (curr_req) { + qdf_mem_free(curr_req); + mac_ctx->rrm.rrmPEContext.pCurrentReq[index] = NULL; + } + curr_req = qdf_mem_malloc(sizeof(*curr_req)); if (NULL == curr_req) { pe_err("Unable to allocate memory during RRM Req processing"); - qdf_mem_free(*radiomes_report); + qdf_mem_free(*radiomes_report); + mac_ctx->rrm.rrmPEContext.pCurrentReq[index] = NULL; return QDF_STATUS_E_NOMEM; } - pe_debug("Processing Beacon Report request"); + pe_debug("Processing Beacon Report request idx:%d", index); curr_req->dialog_token = rrm_req->DialogToken.token; curr_req->token = rrm_req-> MeasurementRequest[index].measurement_token; curr_req->sendEmptyBcnRpt = true; - mac_ctx->rrm.rrmPEContext.pCurrentReq = curr_req; + curr_req->measurement_idx = index; + mac_ctx->rrm.rrmPEContext.pCurrentReq[index] = curr_req; rrm_status = rrm_process_beacon_report_req(mac_ctx, curr_req, &rrm_req->MeasurementRequest[index], session_entry); if (eRRM_SUCCESS != rrm_status) { rrm_process_beacon_request_failure(mac_ctx, - session_entry, peer, rrm_status); - rrm_cleanup(mac_ctx); + session_entry, peer, rrm_status, index); + rrm_cleanup(mac_ctx, index); } } @@ -1215,7 +1234,6 @@ rrm_process_radio_measurement_request(tpAniSirGlobal mac_ctx, QDF_STATUS status = QDF_STATUS_SUCCESS; tpSirMacRadioMeasureReport report = NULL; uint8_t num_report = 0; - tpRRMReq curr_req = mac_ctx->rrm.rrmPEContext.pCurrentReq; if (!rrm_req->num_MeasurementRequest) { report = qdf_mem_malloc(sizeof(tSirMacRadioMeasureReport)); @@ -1255,9 +1273,10 @@ rrm_process_radio_measurement_request(tpAniSirGlobal mac_ctx, case SIR_MAC_RRM_BEACON_TYPE: /* Process beacon request. */ status = rrm_process_beacon_req(mac_ctx, peer, - session_entry, curr_req, &report, rrm_req, - &num_report, i); - if (QDF_STATUS_SUCCESS != status) + session_entry, &report, + rrm_req, &num_report, + i); + if (QDF_IS_STATUS_ERROR(status)) return status; break; case SIR_MAC_RRM_LCI_TYPE: @@ -1370,7 +1389,9 @@ QDF_STATUS rrm_initialize(tpAniSirGlobal pMac) { tpRRMCaps pRRMCaps = &pMac->rrm.rrmPEContext.rrmEnabledCaps; - pMac->rrm.rrmPEContext.pCurrentReq = NULL; + pMac->rrm.rrmPEContext.pCurrentReq[0] = NULL; + pMac->rrm.rrmPEContext.pCurrentReq[1] = NULL; + pMac->rrm.rrmPEContext.txMgmtPower = 0; pMac->rrm.rrmPEContext.DialogToken = 0; @@ -1393,38 +1414,20 @@ QDF_STATUS rrm_initialize(tpAniSirGlobal pMac) return QDF_STATUS_SUCCESS; } -/* -------------------------------------------------------------------- */ -/** - * rrm_cleanup - * - * FUNCTION: - * cleanup RRM module - * - * LOGIC: - * - * ASSUMPTIONS: - * - * NOTE: - * - * @param mode - * @param rate - * @return None - */ - -QDF_STATUS rrm_cleanup(tpAniSirGlobal pMac) +void rrm_cleanup(tpAniSirGlobal mac, uint8_t idx) { - if (pMac->rrm.rrmPEContext.pCurrentReq) { - if (pMac->rrm.rrmPEContext.pCurrentReq->request.Beacon.reqIes. - pElementIds) { - qdf_mem_free(pMac->rrm.rrmPEContext.pCurrentReq-> - request.Beacon.reqIes.pElementIds); - } + tpRRMReq cur_rrm_req = NULL; - qdf_mem_free(pMac->rrm.rrmPEContext.pCurrentReq); - } + cur_rrm_req = mac->rrm.rrmPEContext.pCurrentReq[idx]; + if (!cur_rrm_req) + return; - pMac->rrm.rrmPEContext.pCurrentReq = NULL; - return QDF_STATUS_SUCCESS; + qdf_mem_free(cur_rrm_req->request.Beacon.reqIes.pElementIds); + cur_rrm_req->request.Beacon.reqIes.pElementIds = NULL; + cur_rrm_req->request.Beacon.reqIes.num = 0; + + qdf_mem_free(cur_rrm_req); + mac->rrm.rrmPEContext.pCurrentReq[idx] = NULL; } /** diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/sch/sch_beacon_gen.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/sch/sch_beacon_gen.c index 8765591aae4b..3d60a2fde2cb 100644 --- a/drivers/staging/qcacld-3.0/core/mac/src/pe/sch/sch_beacon_gen.c +++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/sch/sch_beacon_gen.c @@ -433,7 +433,7 @@ sch_set_fixed_beacon_fields(tpAniSirGlobal mac_ctx, tpPESession session) } } } - if (mac_ctx->rrm.rrmSmeContext.rrmConfig.rrm_enabled) + if (mac_ctx->rrm.rrmConfig.rrm_enabled) populate_dot11f_rrm_ie(mac_ctx, &bcn_2->RRMEnabledCap, session); diff --git a/drivers/staging/qcacld-3.0/core/sap/src/sap_fsm.c b/drivers/staging/qcacld-3.0/core/sap/src/sap_fsm.c index 23d1208c68da..5557c67ae56a 100644 --- a/drivers/staging/qcacld-3.0/core/sap/src/sap_fsm.c +++ b/drivers/staging/qcacld-3.0/core/sap/src/sap_fsm.c @@ -1559,6 +1559,12 @@ QDF_STATUS sap_signal_hdd_event(struct sap_context *sap_ctx, reassoc_complete->ies = (csr_roaminfo->assocReqPtr + ASSOC_REQ_IE_OFFSET); + /* skip current AP address in reassoc frame */ + if (csr_roaminfo->fReassocReq) { + reassoc_complete->ies_len -= QDF_MAC_ADDR_SIZE; + reassoc_complete->ies += QDF_MAC_ADDR_SIZE; + } + if (csr_roaminfo->addIELen) { if (wlan_get_vendor_ie_ptr_from_oui( SIR_MAC_P2P_OUI, SIR_MAC_P2P_OUI_SIZE, diff --git a/drivers/staging/qcacld-3.0/core/sme/inc/csr_api.h b/drivers/staging/qcacld-3.0/core/sme/inc/csr_api.h index 5822055762a1..fce0b91dd7aa 100644 --- a/drivers/staging/qcacld-3.0/core/sme/inc/csr_api.h +++ b/drivers/staging/qcacld-3.0/core/sme/inc/csr_api.h @@ -359,6 +359,7 @@ typedef struct tagCsrScanResultFilter { bool realm_check; uint8_t fils_realm[2]; bool force_rsne_override; + qdf_time_t age_threshold; } tCsrScanResultFilter; typedef struct sCsrChnPower_ { @@ -1794,6 +1795,9 @@ typedef QDF_STATUS (*csr_session_close_cb)(uint8_t session_id); #define CSR_IS_FW_FT_FILS_SUPPORTED(fw_akm_bitmap) \ (((fw_akm_bitmap) & (1 << AKM_FT_FILS)) ? true : false) +#define CSR_IS_FW_SUITEB_ROAM_SUPPORTED(fw_akm_bitmap) \ + (((fw_akm_bitmap) & (1 << AKM_SUITEB)) ? true : false) + QDF_STATUS csr_set_channels(tpAniSirGlobal pMac, tCsrConfigParam *pParam); /* enum to string conversion for debug output */ diff --git a/drivers/staging/qcacld-3.0/core/sme/inc/sme_rrm_internal.h b/drivers/staging/qcacld-3.0/core/sme/inc/sme_rrm_internal.h index c590b4d7e4d7..8545ec908866 100644 --- a/drivers/staging/qcacld-3.0/core/sme/inc/sme_rrm_internal.h +++ b/drivers/staging/qcacld-3.0/core/sme/inc/sme_rrm_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2012, 2014-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2012, 2014-2018, 2020 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -64,6 +64,7 @@ typedef struct sRrmSMEContext { uint16_t token; struct qdf_mac_addr sessionBssId; uint8_t regClass; + uint8_t measurement_idx; /* list of all channels to be measured. */ tCsrChannelInfo channelList; uint8_t currentIndex; @@ -74,7 +75,7 @@ typedef struct sRrmSMEContext { uint16_t randnIntvl; uint16_t duration[SIR_ESE_MAX_MEAS_IE_REQS]; uint8_t measMode[SIR_ESE_MAX_MEAS_IE_REQS]; - struct rrm_config_param rrmConfig; + uint32_t scan_id; qdf_mc_timer_t IterMeasTimer; tDblLinkList neighborReportCache; tRrmNeighborRequestControlInfo neighborReqControlInfo; diff --git a/drivers/staging/qcacld-3.0/core/sme/src/common/sme_api.c b/drivers/staging/qcacld-3.0/core/sme/src/common/sme_api.c index 1ff1d6b53f73..2f5354695ab6 100644 --- a/drivers/staging/qcacld-3.0/core/sme/src/common/sme_api.c +++ b/drivers/staging/qcacld-3.0/core/sme/src/common/sme_api.c @@ -913,7 +913,7 @@ QDF_STATUS sme_get_soft_ap_domain(tHalHandle hHal, v_REGDOMAIN_t * Return: None */ void sme_update_fine_time_measurement_capab(tHalHandle hal, uint8_t session_id, - uint32_t val) + uint32_t val) { tpAniSirGlobal mac_ctx = PMAC_STRUCT(hal); QDF_STATUS status; @@ -922,11 +922,11 @@ void sme_update_fine_time_measurement_capab(tHalHandle hal, uint8_t session_id, if (!val) { mac_ctx->rrm.rrmPEContext.rrmEnabledCaps.fine_time_meas_rpt = 0; - ((tpRRMCaps)mac_ctx->rrm.rrmSmeContext. + ((tpRRMCaps)mac_ctx->rrm. rrmConfig.rm_capability)->fine_time_meas_rpt = 0; } else { mac_ctx->rrm.rrmPEContext.rrmEnabledCaps.fine_time_meas_rpt = 1; - ((tpRRMCaps)mac_ctx->rrm.rrmSmeContext. + ((tpRRMCaps)mac_ctx->rrm. rrmConfig.rm_capability)->fine_time_meas_rpt = 1; } @@ -1746,7 +1746,7 @@ QDF_STATUS sme_set_ese_beacon_request(tHalHandle hHal, const uint8_t sessionId, tCsrEseBeaconReqParams *pBeaconReq = NULL; uint8_t counter = 0; struct csr_roam_session *pSession = CSR_GET_SESSION(pMac, sessionId); - tpRrmSMEContext pSmeRrmContext = &pMac->rrm.rrmSmeContext; + tpRrmSMEContext pSmeRrmContext = &pMac->rrm.rrmSmeContext[0]; if (pSmeRrmContext->eseBcnReqInProgress == true) { sme_err("A Beacon Report Req is already in progress"); @@ -1776,6 +1776,7 @@ QDF_STATUS sme_set_ese_beacon_request(tHalHandle hHal, const uint8_t sessionId, pSmeBcnReportReq->channelInfo.channelNum = 255; pSmeBcnReportReq->channelList.numChannels = pEseBcnReq->numBcnReqIe; pSmeBcnReportReq->msgSource = eRRM_MSG_SOURCE_ESE_UPLOAD; + pSmeBcnReportReq->measurement_idx = 0; for (counter = 0; counter < pEseBcnReq->numBcnReqIe; counter++) { pBeaconReq = @@ -3947,8 +3948,8 @@ QDF_STATUS sme_get_config_param(tHalHandle hHal, tSmeConfigParams *pParam) return status; } qdf_mem_copy(&pParam->rrmConfig, - &pMac->rrm.rrmSmeContext.rrmConfig, - sizeof(pMac->rrm.rrmSmeContext.rrmConfig)); + &pMac->rrm.rrmConfig, + sizeof(pMac->rrm.rrmConfig)); pParam->snr_monitor_enabled = pMac->snr_monitor_enabled; sme_release_global_lock(&pMac->sme); } @@ -4164,7 +4165,7 @@ QDF_STATUS sme_oem_update_capability(tHalHandle hal, tpAniSirGlobal pmac = PMAC_STRUCT(hal); uint8_t *bytes; - bytes = pmac->rrm.rrmSmeContext.rrmConfig.rm_capability; + bytes = pmac->rrm.rrmConfig.rm_capability; if (cap->ftm_rr) bytes[4] |= RM_CAP_FTM_RANGE_REPORT; @@ -4192,7 +4193,7 @@ QDF_STATUS sme_oem_get_capability(tHalHandle hal, tpAniSirGlobal pmac = PMAC_STRUCT(hal); uint8_t *bytes; - bytes = pmac->rrm.rrmSmeContext.rrmConfig.rm_capability; + bytes = pmac->rrm.rrmConfig.rm_capability; cap->ftm_rr = bytes[4] & RM_CAP_FTM_RANGE_REPORT; cap->lci_capability = bytes[4] & RM_CAP_CIVIC_LOC_MEASUREMENT; @@ -13570,21 +13571,14 @@ QDF_STATUS sme_set_rssi_monitoring(tHalHandle hal, static enum band_info sme_get_connected_roaming_vdev_band(void) { enum band_info band = BAND_ALL; - tpAniSirGlobal mac = sme_get_mac_context(); - struct csr_roam_session *session; - uint8_t session_id, channel; + tp_wma_handle wma_handle; + uint8_t channel; - if (!mac) { - sme_debug("MAC Context is NULL"); - return band; - } - session_id = csr_get_roam_enabled_sta_sessionid(mac); - if (session_id != CSR_SESSION_ID_INVALID) { - session = CSR_GET_SESSION(mac, session_id); - channel = session->connectedProfile.operationChannel; - band = csr_get_rf_band(channel); - return band; - } + wma_handle = cds_get_context(QDF_MODULE_ID_WMA); + if (!wma_handle) + sme_err("Invalid wma handle"); + channel = wma_get_vdev_chan_roam_enabled(wma_handle); + band = csr_get_rf_band(channel); return band; } @@ -14098,6 +14092,8 @@ void sme_update_tgt_services(tHalHandle hal, struct wma_tgt_services *cfg) FL("mac_ctx->pmf_offload: %d"), mac_ctx->pmf_offload); mac_ctx->is_fils_roaming_supported = cfg->is_fils_roaming_supported; + mac_ctx->stop_all_host_scan_support = + cfg->stop_all_host_scan_support; mac_ctx->is_11k_offload_supported = cfg->is_11k_offload_supported; mac_ctx->akm_service_bitmap = cfg->akm_service_bitmap; diff --git a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c index 7060b61cf55b..9cd174bd0830 100644 --- a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c +++ b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c @@ -240,6 +240,8 @@ enum mgmt_auth_type diag_auth_type_from_csr_type(eCsrAuthType authtype) n = AUTH_WPA_PSK; break; case eCSR_AUTH_TYPE_RSN: + case eCSR_AUTH_TYPE_SUITEB_EAP_SHA256: + case eCSR_AUTH_TYPE_SUITEB_EAP_SHA384: #ifdef WLAN_FEATURE_11W case eCSR_AUTH_TYPE_RSN_8021X_SHA256: #endif @@ -4674,20 +4676,10 @@ QDF_STATUS csr_roam_call_callback(tpAniSirGlobal pMac, uint32_t sessionId, csr_dump_connection_stats(pMac, pSession, roam_info, u1, u2); if (NULL != pSession->callback) { - if (roam_info) { + if (roam_info) roam_info->sessionId = (uint8_t) sessionId; - /* - * the reasonCode will be passed to supplicant by - * cfg80211_disconnected. Based on the document, - * the reason code passed to supplicant needs to set - * to 0 if unknown. eSIR_BEACON_MISSED reason code is - * not recognizable so that we set to 0 instead. - */ - if (roam_info->reasonCode == eSIR_MAC_BEACON_MISSED) - roam_info->reasonCode = 0; - } status = pSession->callback(pSession->pContext, roam_info, - roamId, u1, u2); + roamId, u1, u2); } /* * EVENT_WLAN_STATUS_V2: eCSR_ROAM_ASSOCIATION_COMPLETION, @@ -6353,6 +6345,8 @@ static bool csr_roam_select_bss(tpAniSirGlobal mac_ctx, enum policy_mgr_con_mode mode; uint8_t chan_id; QDF_STATUS qdf_status; + eCsrPhyMode self_phymode = mac_ctx->roam.configParam.phyMode; + tDot11fBeaconIEs *bcn_ies; vdev = wlan_objmgr_get_vdev_by_id_from_pdev(mac_ctx->pdev, vdev_id, @@ -6375,6 +6369,29 @@ static bool csr_roam_select_bss(tpAniSirGlobal mac_ctx, * sessions exempted */ result = &scan_result->Result; + bcn_ies = result->pvIes; + /* + * If phymode is configured to DOT11 Only profile. + * Don't connect to profile which is less than them. + */ + if (bcn_ies && ((self_phymode == eCSR_DOT11_MODE_11n_ONLY && + !bcn_ies->HTCaps.present) || + (self_phymode == eCSR_DOT11_MODE_11ac_ONLY && + !bcn_ies->VHTCaps.present) || + (self_phymode == eCSR_DOT11_MODE_11ax_ONLY && + !bcn_ies->he_cap.present))) { + sme_info("self_phymode %d mismatch HT %d VHT %d HE %d", + self_phymode, bcn_ies->HTCaps.present, + bcn_ies->VHTCaps.present, + bcn_ies->he_cap.present); + *roam_state = eCsrStopRoamingDueToConcurrency; + status = true; + *roam_bss_entry = csr_ll_next(&bss_list->List, + *roam_bss_entry, + LL_ACCESS_LOCK); + continue; + } + /* * Ignore the BSS if any other vdev is already connected * to it. @@ -7315,6 +7332,8 @@ static QDF_STATUS csr_roam_save_params(tpAniSirGlobal mac_ctx, (eCSR_AUTH_TYPE_FT_RSN_PSK == auth_type) || (eCSR_AUTH_TYPE_FT_SAE == auth_type) || (eCSR_AUTH_TYPE_FT_SUITEB_EAP_SHA384 == auth_type) || + (eCSR_AUTH_TYPE_SUITEB_EAP_SHA256 == auth_type) || + (eCSR_AUTH_TYPE_SUITEB_EAP_SHA384 == auth_type) || #if defined WLAN_FEATURE_11W (eCSR_AUTH_TYPE_RSN_PSK_SHA256 == auth_type) || (eCSR_AUTH_TYPE_RSN_8021X_SHA256 == auth_type) || @@ -7498,6 +7517,8 @@ static QDF_STATUS csr_roam_save_security_rsp_ie(tpAniSirGlobal pMac, (eCSR_AUTH_TYPE_FT_RSN_PSK == authType) || (eCSR_AUTH_TYPE_FT_SAE == authType) || (eCSR_AUTH_TYPE_FT_SUITEB_EAP_SHA384 == authType) + || (eCSR_AUTH_TYPE_SUITEB_EAP_SHA256 == authType) + || (eCSR_AUTH_TYPE_SUITEB_EAP_SHA384 == authType) #ifdef FEATURE_WLAN_WAPI || (eCSR_AUTH_TYPE_WAPI_WAI_PSK == authType) || (eCSR_AUTH_TYPE_WAPI_WAI_CERTIFICATE == authType) @@ -15995,63 +16016,40 @@ void csr_clear_sae_single_pmk(tpAniSirGlobal pMac, uint8_t vdev_id, #endif void csr_roam_del_pmk_cache_entry(struct csr_roam_session *session, - tPmkidCacheInfo *cached_pmksa) + tPmkidCacheInfo *cached_pmksa, u32 del_idx) { u32 curr_idx; - u8 del_pmk[CSR_RSN_MAX_PMK_LEN] = {0}; - u32 i, del_idx; + u32 i; - /* copy the PMK of matched BSSID */ - qdf_mem_copy(del_pmk, cached_pmksa->pmk, cached_pmksa->pmk_len); - - /* Search for matching PMK in session PMK cache */ - for (del_idx = 0; del_idx != session->NumPmkidCache; del_idx++) { - cached_pmksa = &session->PmkidCacheInfo[del_idx]; - if (cached_pmksa->pmk_len && (!qdf_mem_cmp - (cached_pmksa->pmk, del_pmk, cached_pmksa->pmk_len))) { - /* Clear this - matched entry */ - qdf_mem_zero(cached_pmksa, sizeof(tPmkidCacheInfo)); - - /* Match Found, Readjust the other entries */ - curr_idx = session->curr_cache_idx; - if (del_idx < curr_idx) { - for (i = del_idx; i < (curr_idx - 1); i++) { - qdf_mem_copy(&session-> - PmkidCacheInfo[i], - &session-> - PmkidCacheInfo[i + 1], - sizeof(tPmkidCacheInfo)); - } - - session->curr_cache_idx--; - qdf_mem_zero(&session->PmkidCacheInfo - [session->curr_cache_idx], - sizeof(tPmkidCacheInfo)); - } else if (del_idx > curr_idx) { - for (i = del_idx; i > (curr_idx); i--) { - qdf_mem_copy(&session-> - PmkidCacheInfo[i], - &session-> - PmkidCacheInfo[i - 1], - sizeof(tPmkidCacheInfo)); - } - - qdf_mem_zero(&session->PmkidCacheInfo - [session->curr_cache_idx], - sizeof(tPmkidCacheInfo)); - } - - /* Decrement the count since an entry is been deleted */ - session->NumPmkidCache--; - sme_debug("PMKID at index=%d deleted, current index=%d cache count=%d", - del_idx, session->curr_cache_idx, - session->NumPmkidCache); - /* As we re-adjusted entries by one position search - * again from current index - */ - del_idx--; + /* Clear this - matched entry */ + qdf_mem_zero(cached_pmksa, sizeof(tPmkidCacheInfo)); + /* Match Found, Readjust the other entries */ + curr_idx = session->curr_cache_idx; + if (del_idx < curr_idx) { + for (i = del_idx; i < (curr_idx - 1); i++) { + qdf_mem_copy(&session->PmkidCacheInfo[i], + &session->PmkidCacheInfo[i + 1], + sizeof(tPmkidCacheInfo)); } + + session->curr_cache_idx--; + qdf_mem_zero(&session->PmkidCacheInfo[session->curr_cache_idx], + sizeof(tPmkidCacheInfo)); + } else if (del_idx > curr_idx) { + for (i = del_idx; i > (curr_idx); i--) { + qdf_mem_copy(&session->PmkidCacheInfo[i], + &session->PmkidCacheInfo[i - 1], + sizeof(tPmkidCacheInfo)); + } + + qdf_mem_zero(&session->PmkidCacheInfo[session->curr_cache_idx], + sizeof(tPmkidCacheInfo)); } + + /* Decrement the count since an entry is been deleted */ + session->NumPmkidCache--; + sme_debug("PMKID at index=%d deleted, current index=%d cache count=%d", + del_idx, session->curr_cache_idx, session->NumPmkidCache); } QDF_STATUS csr_roam_del_pmkid_from_cache(tpAniSirGlobal pMac, @@ -16063,6 +16061,9 @@ QDF_STATUS csr_roam_del_pmkid_from_cache(tpAniSirGlobal pMac, bool fMatchFound = false; uint32_t Index; tPmkidCacheInfo *cached_pmksa; + u32 del_idx; + u8 del_pmk[CSR_RSN_MAX_PMK_LEN] = {0}; + if (!pSession) { sme_err("session %d not found", sessionId); @@ -16104,8 +16105,35 @@ QDF_STATUS csr_roam_del_pmkid_from_cache(tpAniSirGlobal pMac, fMatchFound = 1; if (fMatchFound) { - /* Delete the matched PMK cache entry */ - csr_roam_del_pmk_cache_entry(pSession, cached_pmksa); + /* copy the PMK of matched BSSID */ + qdf_mem_copy(del_pmk, cached_pmksa->pmk, + cached_pmksa->pmk_len); + + /* Free the matched entry to address null pmk_len case*/ + csr_roam_del_pmk_cache_entry(pSession, cached_pmksa, + Index); + + /* Search for matching PMK in session PMK cache */ + for (del_idx = 0; del_idx != pSession->NumPmkidCache; + del_idx++) { + cached_pmksa = + &pSession->PmkidCacheInfo[del_idx]; + if (cached_pmksa->pmk_len && (!qdf_mem_cmp + (cached_pmksa->pmk, del_pmk, + cached_pmksa->pmk_len))) { + /* Delete the matched PMK cache entry */ + csr_roam_del_pmk_cache_entry( + pSession, cached_pmksa, + del_idx); + /* Search again from current index as we + * re-adjusted entries by one position + */ + del_idx--; + } + } + /* reset stored pmk */ + qdf_mem_zero(del_pmk, CSR_RSN_MAX_PMK_LEN); + break; } } @@ -17527,7 +17555,7 @@ QDF_STATUS csr_send_join_req_msg(tpAniSirGlobal pMac, uint32_t sessionId, /* Fill rrm config parameters */ qdf_mem_copy(&csr_join_req->rrm_config, - &pMac->rrm.rrmSmeContext.rrmConfig, + &pMac->rrm.rrmConfig, sizeof(struct rrm_config_param)); pAP_capabilityInfo = @@ -19882,8 +19910,10 @@ csr_update_roam_scan_offload_request(tpAniSirGlobal mac_ctx, mac_ctx->roam.configParam.roam_trigger_reason_bitmask; req_buf->roaming_scan_policy = mac_ctx->roam.configParam.roaming_scan_policy; + /* Do not force RSSI triggers in case controlled roaming enable */ req_buf->roam_force_rssi_trigger = - mac_ctx->roam.configParam.roam_force_rssi_trigger; + (!neighbor_roam_info->roam_control_enable && + mac_ctx->roam.configParam.roam_force_rssi_trigger); csr_update_roam_req_adaptive_11r(session, req_buf); @@ -21746,6 +21776,13 @@ csr_roam_offload_scan(tpAniSirGlobal mac_ctx, uint8_t session_id, return QDF_STATUS_SUCCESS; } + if ((roam_profile_akm == eCSR_AUTH_TYPE_SUITEB_EAP_SHA256 || + roam_profile_akm == eCSR_AUTH_TYPE_SUITEB_EAP_SHA384) && + !CSR_IS_FW_SUITEB_ROAM_SUPPORTED(fw_akm_bitmap)) { + sme_info("Roaming not supported for SUITEB connection"); + return QDF_STATUS_SUCCESS; + } + /* * If fw doesn't advertise FT SAE, FT-FILS or FT-Suite-B capability, * don't support roaming to that profile @@ -23759,6 +23796,8 @@ static QDF_STATUS csr_process_roam_sync_callback(tpAniSirGlobal mac_ctx, tpCsrNeighborRoamControlInfo neigh_roam_info = &mac_ctx->roam.neighborRoamInfo[session_id]; uint32_t chan_id; + bool abort_host_scan_cap = false; + wlan_scan_id scan_id; if (!session) { sme_err("LFR3: Session not found"); @@ -23791,8 +23830,29 @@ static QDF_STATUS csr_process_roam_sync_callback(tpAniSirGlobal mac_ctx, ROAMING_OFFLOAD_TIMER_START); csr_roam_call_callback(mac_ctx, session_id, NULL, 0, eCSR_ROAM_START, eCSR_ROAM_RESULT_SUCCESS); + /* + * For emergency deauth roaming, firmware sends ROAM start + * instead of ROAM scan start notification as data path queues + * will be stopped only during roam start notification. + * This is because, for deauth/disassoc triggered roam, the + * AP has sent deauth, and packets shouldn't be sent to AP + * after that. Since firmware is sending roam start directly + * host sends scan abort during roam scan, but in other + * triggers, the host receives roam start after candidate + * selection and roam scan is complete. So when host sends + * roam abort for emergency deauth roam trigger, the firmware + * roam scan is also aborted. This results in roaming failure. + * So send scan_id as CANCEL_HOST_SCAN_ID to scan module to + * abort only host triggered scans. + */ + abort_host_scan_cap = mac_ctx->stop_all_host_scan_support; + if (abort_host_scan_cap) + scan_id = CANCEL_HOST_SCAN_ID; + else + scan_id = INVAL_SCAN_ID; + wlan_abort_scan(mac_ctx->pdev, INVAL_PDEV_ID, - session_id, INVAL_SCAN_ID, false); + session_id, scan_id, false); return status; case SIR_ROAMING_ABORT: csr_roam_roaming_offload_timer_action(mac_ctx, diff --git a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_scan.c b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_scan.c index 96e467ab69bb..8342a3fe0705 100644 --- a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_scan.c +++ b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_scan.c @@ -2547,6 +2547,7 @@ static QDF_STATUS csr_prepare_scan_filter(tpAniSirGlobal mac_ctx, filter->ignore_auth_enc_type = true; filter->rrm_measurement_filter = pFilter->fMeasurement; + filter->age_threshold = pFilter->age_threshold; filter->mobility_domain = pFilter->MDID.mobilityDomain; diff --git a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_inside_api.h b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_inside_api.h index b1d5890ec515..17ebbbbae19d 100644 --- a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_inside_api.h +++ b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_inside_api.h @@ -1001,7 +1001,7 @@ QDF_STATUS csr_roam_del_pmkid_from_cache(tpAniSirGlobal pMac, bool flush_cache); void csr_roam_del_pmk_cache_entry(struct csr_roam_session *session, - tPmkidCacheInfo *cached_pmksa); + tPmkidCacheInfo *cached_pmksa, u32 del_idx); #if defined(WLAN_SAE_SINGLE_PMK) && defined(WLAN_FEATURE_ROAM_OFFLOAD) /** diff --git a/drivers/staging/qcacld-3.0/core/sme/src/rrm/sme_rrm.c b/drivers/staging/qcacld-3.0/core/sme/src/rrm/sme_rrm.c index 2501fd443efe..a2886b91265e 100644 --- a/drivers/staging/qcacld-3.0/core/sme/src/rrm/sme_rrm.c +++ b/drivers/staging/qcacld-3.0/core/sme/src/rrm/sme_rrm.c @@ -110,46 +110,46 @@ static void rrm_indicate_neighbor_report_result(tpAniSirGlobal pMac, void *callbackContext; /* Reset the neighbor response pending status */ - pMac->rrm.rrmSmeContext.neighborReqControlInfo.isNeighborRspPending = - false; + pMac->rrm.rrmSmeContext[DEFAULT_RRM_IDX]. + neighborReqControlInfo.isNeighborRspPending = false; /* Stop the timer if it is already running. * The timer should be running only in the SUCCESS case. */ if (QDF_TIMER_STATE_RUNNING == - qdf_mc_timer_get_current_state(&pMac->rrm.rrmSmeContext. + qdf_mc_timer_get_current_state(&pMac->rrm. + rrmSmeContext[DEFAULT_RRM_IDX]. neighborReqControlInfo. neighborRspWaitTimer)) { sme_debug("No entry in neighbor report cache"); - qdf_mc_timer_stop(&pMac->rrm.rrmSmeContext. + qdf_mc_timer_stop(&pMac->rrm.rrmSmeContext[DEFAULT_RRM_IDX]. neighborReqControlInfo.neighborRspWaitTimer); } callback = - pMac->rrm.rrmSmeContext.neighborReqControlInfo. + pMac->rrm.rrmSmeContext[DEFAULT_RRM_IDX].neighborReqControlInfo. neighborRspCallbackInfo.neighborRspCallback; callbackContext = - pMac->rrm.rrmSmeContext.neighborReqControlInfo. + pMac->rrm.rrmSmeContext[DEFAULT_RRM_IDX].neighborReqControlInfo. neighborRspCallbackInfo.neighborRspCallbackContext; /* Reset the callback and the callback context before calling the * callback. It is very likely that there may be a registration in * callback itself. */ - pMac->rrm.rrmSmeContext.neighborReqControlInfo.neighborRspCallbackInfo. - neighborRspCallback = NULL; - pMac->rrm.rrmSmeContext.neighborReqControlInfo.neighborRspCallbackInfo. - neighborRspCallbackContext = NULL; + pMac->rrm.rrmSmeContext[DEFAULT_RRM_IDX].neighborReqControlInfo. + neighborRspCallbackInfo.neighborRspCallback = NULL; + pMac->rrm.rrmSmeContext[DEFAULT_RRM_IDX].neighborReqControlInfo. + neighborRspCallbackInfo.neighborRspCallbackContext = NULL; /* Call the callback with the status received from caller */ if (callback) callback(callbackContext, qdf_status); - - } /** * sme_RrmBeaconReportXmitInd () - Send beacon report * @mac_ctx Pointer to mac context + * @measurement_index: Measurement index * @result_arr scan results * @msrmnt_status flag to indicate that the measurement is done. * @bss_count bss count @@ -161,8 +161,8 @@ static void rrm_indicate_neighbor_report_result(tpAniSirGlobal pMac, static QDF_STATUS sme_rrm_send_beacon_report_xmit_ind(tpAniSirGlobal mac_ctx, - tCsrScanResultInfo **result_arr, uint8_t msrmnt_status, - uint8_t bss_count) + uint8_t measurement_index, tCsrScanResultInfo **result_arr, + uint8_t msrmnt_status, uint8_t bss_count) { tpSirBssDescription bss_desc = NULL; tpSirBeaconReportXmitInd beacon_rep; @@ -171,7 +171,8 @@ sme_rrm_send_beacon_report_xmit_ind(tpAniSirGlobal mac_ctx, uint8_t i = 0, j = 0, counter = 0; tCsrScanResultInfo *cur_result = NULL; QDF_STATUS status = QDF_STATUS_E_FAILURE; - tpRrmSMEContext rrm_ctx = &mac_ctx->rrm.rrmSmeContext; + tpRrmSMEContext rrm_ctx = + &mac_ctx->rrm.rrmSmeContext[measurement_index]; tpSirBssDescription bss_desc_to_free[SIR_BCN_REPORT_MAX_BSS_DESC] = {0}; if (NULL == result_arr && !msrmnt_status) { @@ -191,6 +192,7 @@ sme_rrm_send_beacon_report_xmit_ind(tpAniSirGlobal mac_ctx, } beacon_rep->messageType = eWNI_SME_BEACON_REPORT_RESP_XMIT_IND; beacon_rep->length = length; + beacon_rep->measurement_idx = measurement_index; beacon_rep->uDialogToken = rrm_ctx->token; beacon_rep->duration = rrm_ctx->duration[0]; beacon_rep->regClass = rrm_ctx->regClass; @@ -252,6 +254,7 @@ sme_rrm_send_beacon_report_xmit_ind(tpAniSirGlobal mac_ctx, /** * sme_ese_send_beacon_req_scan_results () - Send beacon report * @mac_ctx Pointer to mac context + * @measurement_index: Measurement request index * @session_id - session id * @result_arr scan results * @msrmnt_status flag to indicate that the measurement is done. @@ -266,8 +269,9 @@ sme_rrm_send_beacon_report_xmit_ind(tpAniSirGlobal mac_ctx, * Return: status */ static QDF_STATUS sme_ese_send_beacon_req_scan_results( - tpAniSirGlobal mac_ctx, uint32_t session_id, - uint8_t channel, tCsrScanResultInfo **result_arr, + tpAniSirGlobal mac_ctx, uint8_t measurement_index, + uint32_t session_id, uint8_t channel, + tCsrScanResultInfo **result_arr, uint8_t msrmnt_status, uint8_t bss_count) { QDF_STATUS status = QDF_STATUS_E_FAILURE; @@ -277,7 +281,8 @@ static QDF_STATUS sme_ese_send_beacon_req_scan_results( uint32_t out_ie_len = 0; uint8_t bss_counter = 0; tCsrScanResultInfo *cur_result = NULL; - tpRrmSMEContext rrm_ctx = &mac_ctx->rrm.rrmSmeContext; + tpRrmSMEContext rrm_ctx = + &mac_ctx->rrm.rrmSmeContext[measurement_index]; struct csr_roam_info *roam_info; tSirEseBcnReportRsp bcn_rpt_rsp; tpSirEseBcnReportRsp bcn_report = &bcn_rpt_rsp; @@ -417,6 +422,7 @@ void sme_reset_ese_bcn_req_in_progress(tpRrmSMEContext sme_rrm_ctx) /** * sme_rrm_send_scan_result() - to get scan result and send the beacon report * @mac_ctx: pointer to mac context + * @measurement_index: Measurement request number * @num_chan: number of channels * @chan_list: list of channels to fetch the result from * @measurementdone: Flag to indicate measurement done or no @@ -427,6 +433,7 @@ void sme_reset_ese_bcn_req_in_progress(tpRrmSMEContext sme_rrm_ctx) * Return: QDF_STATUS */ static QDF_STATUS sme_rrm_send_scan_result(tpAniSirGlobal mac_ctx, + uint8_t measurement_index, uint8_t num_chan, uint8_t *chan_list, uint8_t measurementdone) @@ -439,7 +446,8 @@ static QDF_STATUS sme_rrm_send_scan_result(tpAniSirGlobal mac_ctx, struct scan_result_list *result_list; QDF_STATUS status; uint8_t num_scan_results, counter = 0; - tpRrmSMEContext rrm_ctx = &mac_ctx->rrm.rrmSmeContext; + tpRrmSMEContext rrm_ctx = + &mac_ctx->rrm.rrmSmeContext[measurement_index]; uint32_t session_id; struct csr_roam_info *roam_info = NULL; tSirScanType scan_type; @@ -470,6 +478,16 @@ static QDF_STATUS sme_rrm_send_scan_result(tpAniSirGlobal mac_ctx, filter.ChannelInfo.ChannelList = chan_list; filter.fMeasurement = true; + if (eRRM_MSG_SOURCE_ESE_UPLOAD == rrm_ctx->msgSource || + eRRM_MSG_SOURCE_LEGACY_ESE == rrm_ctx->msgSource) + scan_type = rrm_ctx->measMode[rrm_ctx->currentIndex]; + else + scan_type = rrm_ctx->measMode[0]; + + if (scan_type == eSIR_BEACON_TABLE) + filter.age_threshold = + ucfg_scan_get_aging_time(mac_ctx->psoc); + /* * In case this is beacon report request from last AP (before roaming) * following call to csr_roam_get_session_id_from_bssid will fail, @@ -486,7 +504,8 @@ static QDF_STATUS sme_rrm_send_scan_result(tpAniSirGlobal mac_ctx, if (filter.SSIDs.SSIDList) qdf_mem_free(filter.SSIDs.SSIDList); - sme_debug("RRM Measurement Done %d", measurementdone); + sme_debug("RRM Measurement Done %d for index:%d", measurementdone, + measurement_index); if (NULL == result_handle) { /* * no scan results @@ -506,12 +525,14 @@ static QDF_STATUS sme_rrm_send_scan_result(tpAniSirGlobal mac_ctx, #ifdef FEATURE_WLAN_ESE if (eRRM_MSG_SOURCE_ESE_UPLOAD == rrm_ctx->msgSource) status = sme_ese_send_beacon_req_scan_results(mac_ctx, - session_id, chan_list[0], - NULL, measurementdone, 0); + measurement_index, session_id, + chan_list[0], NULL, + measurementdone, 0); else #endif /* FEATURE_WLAN_ESE */ status = sme_rrm_send_beacon_report_xmit_ind(mac_ctx, - NULL, measurementdone, 0); + measurement_index, NULL, + measurementdone, 0); return status; } scan_results = sme_scan_result_get_first(mac_handle, result_handle); @@ -519,15 +540,14 @@ static QDF_STATUS sme_rrm_send_scan_result(tpAniSirGlobal mac_ctx, #ifdef FEATURE_WLAN_ESE if (eRRM_MSG_SOURCE_ESE_UPLOAD == rrm_ctx->msgSource) { status = sme_ese_send_beacon_req_scan_results(mac_ctx, - session_id, - chan_list[0], - NULL, - measurementdone, - 0); + measurement_index, session_id, + chan_list[0], NULL, + measurementdone, 0); } else #endif /* FEATURE_WLAN_ESE */ status = sme_rrm_send_beacon_report_xmit_ind(mac_ctx, - NULL, measurementdone, 0); + measurement_index, NULL, + measurementdone, 0); } result_list = (struct scan_result_list *)result_handle; @@ -563,11 +583,6 @@ static QDF_STATUS sme_rrm_send_scan_result(tpAniSirGlobal mac_ctx, goto rrm_send_scan_results_done; } - if (eRRM_MSG_SOURCE_ESE_UPLOAD == rrm_ctx->msgSource || - eRRM_MSG_SOURCE_LEGACY_ESE == rrm_ctx->msgSource) - scan_type = rrm_ctx->measMode[rrm_ctx->currentIndex]; - else - scan_type = rrm_ctx->measMode[0]; while (scan_results) { /* @@ -618,14 +633,14 @@ static QDF_STATUS sme_rrm_send_scan_result(tpAniSirGlobal mac_ctx, #ifdef FEATURE_WLAN_ESE if (eRRM_MSG_SOURCE_ESE_UPLOAD == rrm_ctx->msgSource) status = sme_ese_send_beacon_req_scan_results(mac_ctx, - session_id, chan_list[0], - scanresults_arr, measurementdone, - counter); + measurement_index, session_id, + chan_list[0], scanresults_arr, + measurementdone, counter); else #endif /* FEATURE_WLAN_ESE */ status = sme_rrm_send_beacon_report_xmit_ind(mac_ctx, - scanresults_arr, measurementdone, - counter); + measurement_index, scanresults_arr, + measurementdone, counter); } rrm_send_scan_results_done: @@ -640,7 +655,8 @@ rrm_send_scan_results_done: /** * sme_rrm_scan_request_callback() -Sends the beacon report xmit to PE - * @halHandle: Pointer to the Hal Handle. + * @mac_ctx: Pointer to mac context + * @pSmeRrmContext: SME rrm context for measurement request * @sessionId: session id * @scanId: Scan ID. * @status: CSR Status. @@ -651,18 +667,18 @@ rrm_send_scan_results_done: * * Return : 0 for success, non zero for failure */ -static QDF_STATUS sme_rrm_scan_request_callback(tHalHandle halHandle, +static QDF_STATUS sme_rrm_scan_request_callback(tpAniSirGlobal pMac, + tpRrmSMEContext pSmeRrmContext, uint8_t sessionId, uint32_t scanId, eCsrScanStatus status) { uint16_t interval; - tpAniSirGlobal pMac = (tpAniSirGlobal) halHandle; - tpRrmSMEContext pSmeRrmContext = &pMac->rrm.rrmSmeContext; uint32_t time_tick; QDF_STATUS qdf_status; uint32_t session_id; bool valid_result = true; + uint8_t ch_idx, num_chan; /* * RRM scan response received after roaming to different AP. @@ -676,16 +692,46 @@ static QDF_STATUS sme_rrm_scan_request_callback(tHalHandle halHandle, valid_result = false; } + if (pSmeRrmContext->channelList.ChannelList) { + sme_err("[802.11 RRM]: Global freq list is null"); + pSmeRrmContext->channelList.numOfChannels = 0; + sme_reset_ese_bcn_req_in_progress(pSmeRrmContext); + return QDF_STATUS_E_FAILURE; + } + /* if any more channels are pending, start a timer of a random value * within randomization interval. */ - if (((pSmeRrmContext->currentIndex + 1) < - pSmeRrmContext->channelList.numOfChannels) && valid_result) { - sme_rrm_send_scan_result(pMac, 1, - &pSmeRrmContext->channelList. - ChannelList[pSmeRrmContext - ->currentIndex], - false); + ch_idx = pSmeRrmContext->currentIndex; + num_chan = pSmeRrmContext->channelList.numOfChannels; + if (((ch_idx + 1) < num_chan) && valid_result) { + if (QDF_TIMER_STATE_RUNNING == + qdf_mc_timer_get_current_state( + &pSmeRrmContext->IterMeasTimer)) { + /* + * Measurement random timer is already running, this + * should not happen because the driver doesn't support + * multiple measurements simultaneously. Also for + * multiple measurements on a single report, the + * channels in op class should be appended to the global + * channel list + */ + sme_err("[802.11 RRM]: meas timer is already running"); + sme_rrm_send_scan_result( + pMac, pSmeRrmContext->measurement_idx, 1, + &pSmeRrmContext->channelList. + ChannelList[ch_idx], true); + qdf_mem_free(pSmeRrmContext->channelList.ChannelList); + pSmeRrmContext->channelList.ChannelList = NULL; + pSmeRrmContext->channelList.numOfChannels = 0; + sme_reset_ese_bcn_req_in_progress(pSmeRrmContext); + return QDF_STATUS_E_FAILURE; + } + + sme_rrm_send_scan_result(pMac, pSmeRrmContext->measurement_idx, + 1, &pSmeRrmContext->channelList. + ChannelList[ch_idx], false); + /* Advance the current index. */ pSmeRrmContext->currentIndex++; /* start the timer to issue next request. */ @@ -702,6 +748,7 @@ static QDF_STATUS sme_rrm_scan_request_callback(tHalHandle halHandle, if (QDF_IS_STATUS_ERROR(qdf_status)) { qdf_mem_free(pSmeRrmContext->channelList.ChannelList); pSmeRrmContext->channelList.ChannelList = NULL; + pSmeRrmContext->channelList.numOfChannels = 0; sme_reset_ese_bcn_req_in_progress(pSmeRrmContext); } @@ -709,13 +756,12 @@ static QDF_STATUS sme_rrm_scan_request_callback(tHalHandle halHandle, /* Done with the measurement. Clean up all context and send a * message to PE with measurement done flag set. */ - sme_rrm_send_scan_result(pMac, 1, - &pSmeRrmContext->channelList. - ChannelList[pSmeRrmContext - ->currentIndex], - true); + sme_rrm_send_scan_result(pMac, pSmeRrmContext->measurement_idx, + 1, &pSmeRrmContext->channelList. + ChannelList[ch_idx], true); qdf_mem_free(pSmeRrmContext->channelList.ChannelList); pSmeRrmContext->channelList.ChannelList = NULL; + pSmeRrmContext->channelList.numOfChannels = 0; sme_reset_ese_bcn_req_in_progress(pSmeRrmContext); } @@ -725,17 +771,18 @@ static QDF_STATUS sme_rrm_scan_request_callback(tHalHandle halHandle, static void sme_rrm_scan_event_callback(struct wlan_objmgr_vdev *vdev, struct scan_event *event, void *arg) { + tpAniSirGlobal mac_ctx; + tpRrmSMEContext smerrmctx; uint32_t scan_id; - uint8_t session_id; + uint8_t session_id, i; eCsrScanStatus scan_status = eCSR_SCAN_FAILURE; - tHalHandle hal_handle; bool success = false; session_id = wlan_vdev_get_id(vdev); scan_id = event->scan_id; - hal_handle = cds_get_context(QDF_MODULE_ID_SME); - if (!hal_handle) { - QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_FATAL, - FL("invalid h_hal")); + + mac_ctx = (tpAniSirGlobal)arg; + if (!mac_ctx) { + sme_err("invalid mac_ctx"); return; } @@ -748,25 +795,40 @@ static void sme_rrm_scan_event_callback(struct wlan_objmgr_vdev *vdev, if (success) scan_status = eCSR_SCAN_SUCCESS; - sme_rrm_scan_request_callback(hal_handle, session_id, - scan_id, scan_status); + for (i = 0; i < MAX_MEASUREMENT_REQUEST; i++) { + smerrmctx = &mac_ctx->rrm.rrmSmeContext[i]; + if (smerrmctx->scan_id == scan_id) + break; + + if (i == (MAX_MEASUREMENT_REQUEST - 1)) + return; + } + + sme_debug("Scan completed for scan_id:%d measurement_idx:%d", + scan_id, smerrmctx->measurement_idx); + sme_rrm_scan_request_callback(mac_ctx, smerrmctx, session_id, + scan_id, scan_status); } /** * sme_rrm_issue_scan_req() - To issue rrm scan request * @mac_ctx: pointer to mac context + * @idx: Measurement Index * * This routine is called to issue rrm scan request * * Return: QDF_STATUS */ -static QDF_STATUS sme_rrm_issue_scan_req(tpAniSirGlobal mac_ctx) +static QDF_STATUS +sme_rrm_issue_scan_req(tpAniSirGlobal mac_ctx, uint8_t idx) { QDF_STATUS status = QDF_STATUS_SUCCESS; - tpRrmSMEContext sme_rrm_ctx = &mac_ctx->rrm.rrmSmeContext; + tpRrmSMEContext sme_rrm_ctx = &mac_ctx->rrm.rrmSmeContext[idx]; uint32_t session_id; tSirScanType scan_type; + uint8_t *chan_list; + uint8_t ch_idx; status = csr_roam_get_session_id_from_bssid(mac_ctx, &sme_rrm_ctx->sessionBssId, &session_id); @@ -779,7 +841,8 @@ static QDF_STATUS sme_rrm_issue_scan_req(tpAniSirGlobal mac_ctx) if ((sme_rrm_ctx->currentIndex) >= sme_rrm_ctx->channelList.numOfChannels) { - sme_rrm_send_beacon_report_xmit_ind(mac_ctx, NULL, true, 0); + sme_rrm_send_beacon_report_xmit_ind(mac_ctx, idx, NULL, + true, 0); sme_debug("done with the complete ch lt. finish and fee now"); goto free_ch_lst; } @@ -791,7 +854,7 @@ static QDF_STATUS sme_rrm_issue_scan_req(tpAniSirGlobal mac_ctx) scan_type = sme_rrm_ctx->measMode[0]; if ((eSIR_ACTIVE_SCAN == scan_type) || - (eSIR_PASSIVE_SCAN == scan_type)) { + (eSIR_PASSIVE_SCAN == scan_type)) { uint32_t max_chan_time; uint64_t current_time; struct scan_start_request *req; @@ -817,6 +880,10 @@ static QDF_STATUS sme_rrm_issue_scan_req(tpAniSirGlobal mac_ctx) } ucfg_scan_init_default_params(vdev, req); req->scan_req.scan_id = ucfg_scan_get_scan_id(mac_ctx->psoc); + sme_rrm_ctx->scan_id = req->scan_req.scan_id; + + sme_debug("RRM_SCN: rrm_idx:%d scan_id:%d", + sme_rrm_ctx->measurement_idx, sme_rrm_ctx->scan_id); req->scan_req.scan_f_passive = (scan_type == eSIR_ACTIVE_SCAN) ? false : true; req->scan_req.vdev_id = wlan_vdev_get_id(vdev); @@ -909,27 +976,35 @@ static QDF_STATUS sme_rrm_issue_scan_req(tpAniSirGlobal mac_ctx) * pScanResult->timer >= rrm_scan_timer */ rrm_scan_timer = 0; - if ((sme_rrm_ctx->currentIndex + 1) < - sme_rrm_ctx->channelList.numOfChannels) { - sme_rrm_send_scan_result(mac_ctx, 1, - &sme_rrm_ctx->channelList.ChannelList[ - sme_rrm_ctx->currentIndex], false); - /* Advance the current index. */ - sme_rrm_ctx->currentIndex++; - sme_rrm_issue_scan_req(mac_ctx); -#ifdef FEATURE_WLAN_ESE - sme_rrm_ctx->eseBcnReqInProgress = false; -#endif - return status; - } else { - /* - * Done with the measurement. Clean up all context and - * send a message to PE with measurement done flag set. - */ - sme_rrm_send_scan_result(mac_ctx, 1, - &sme_rrm_ctx->channelList.ChannelList[ - sme_rrm_ctx->currentIndex], true); - goto free_ch_lst; + chan_list = sme_rrm_ctx->channelList.ChannelList; + if (!chan_list) { + sme_err("[802.11 RRM]: Global channel list is null"); + sme_reset_ese_bcn_req_in_progress(sme_rrm_ctx); + status = QDF_STATUS_E_FAILURE; + goto send_ind; + } + + ch_idx = sme_rrm_ctx->currentIndex; + for (; ch_idx < sme_rrm_ctx->channelList.numOfChannels; ch_idx++) { + + if ((ch_idx + 1) < + sme_rrm_ctx->channelList.numOfChannels) { + sme_rrm_send_scan_result(mac_ctx, idx, 1, + &sme_rrm_ctx->channelList.ChannelList[ + ch_idx], false); + /* Advance the current index. */ + sme_rrm_ctx->currentIndex++; + } else { + /* + * Done with the measurement. Clean up all context and + * send a message to PE with measurement done flag set. + */ + sme_rrm_send_scan_result(mac_ctx, idx, 1, + &sme_rrm_ctx->channelList.ChannelList[ + ch_idx], true); + sme_reset_ese_bcn_req_in_progress(sme_rrm_ctx); + goto free_ch_lst; + } } } @@ -940,7 +1015,7 @@ static QDF_STATUS sme_rrm_issue_scan_req(tpAniSirGlobal mac_ctx) * and PE will not handle subsequent Beacon requests */ send_ind: - sme_rrm_send_beacon_report_xmit_ind(mac_ctx, NULL, true, 0); + sme_rrm_send_beacon_report_xmit_ind(mac_ctx, idx, NULL, true, 0); free_ch_lst: qdf_mem_free(sme_rrm_ctx->channelList.ChannelList); sme_rrm_ctx->channelList.ChannelList = NULL; @@ -996,13 +1071,14 @@ QDF_STATUS sme_rrm_process_beacon_report_req_ind(tpAniSirGlobal pMac, void *pMsgBuf) { tpSirBeaconReportReqInd pBeaconReq = (tpSirBeaconReportReqInd) pMsgBuf; - tpRrmSMEContext pSmeRrmContext = &pMac->rrm.rrmSmeContext; + tpRrmSMEContext pSmeRrmContext; uint32_t len = 0, i = 0; uint8_t country[WNI_CFG_COUNTRY_CODE_LEN]; uint32_t session_id; struct csr_roam_session *session; QDF_STATUS status; + pSmeRrmContext = &pMac->rrm.rrmSmeContext[pBeaconReq->measurement_idx]; status = csr_roam_get_session_id_from_bssid(pMac, (struct qdf_mac_addr *) pBeaconReq->bssId, &session_id); @@ -1026,7 +1102,8 @@ QDF_STATUS sme_rrm_process_beacon_report_req_ind(tpAniSirGlobal pMac, country[2] = OP_CLASS_GLOBAL; - sme_debug("Request Reg class %d, AP's country code %c%c 0x%x Channel %d", + sme_debug("RRM SCN: Index:%d Request Reg class %d, AP's country code %c%c 0x%x Channel %d", + pBeaconReq->measurement_idx, pBeaconReq->channelInfo.regulatoryClass, country[0], country[1], country[2], pBeaconReq->channelInfo.channelNum); @@ -1131,7 +1208,7 @@ QDF_STATUS sme_rrm_process_beacon_report_req_ind(tpAniSirGlobal pMac, pSmeRrmContext->regClass = pBeaconReq->channelInfo.regulatoryClass; pSmeRrmContext->randnIntvl = QDF_MAX(pBeaconReq->randomizationInterval, - pSmeRrmContext->rrmConfig.max_randn_interval); + pMac->rrm.rrmConfig.max_randn_interval); pSmeRrmContext->currentIndex = 0; pSmeRrmContext->msgSource = pBeaconReq->msgSource; qdf_mem_copy((uint8_t *) &pSmeRrmContext->measMode, @@ -1145,7 +1222,7 @@ QDF_STATUS sme_rrm_process_beacon_report_req_ind(tpAniSirGlobal pMac, pSmeRrmContext->token, pSmeRrmContext->randnIntvl, pSmeRrmContext->msgSource); - return sme_rrm_issue_scan_req(pMac); + return sme_rrm_issue_scan_req(pMac, pBeaconReq->measurement_idx); cleanup: if (pBeaconReq->msgSource == eRRM_MSG_SOURCE_11K) { @@ -1156,7 +1233,9 @@ cleanup: /* copy measurement bssid */ qdf_mem_copy(pSmeRrmContext->bssId, pBeaconReq->macaddrBssid, sizeof(tSirMacAddr)); - sme_rrm_send_beacon_report_xmit_ind(pMac, NULL, true, 0); + sme_rrm_send_beacon_report_xmit_ind(pMac, + pBeaconReq->measurement_idx, + NULL, true, 0); } return status; @@ -1191,7 +1270,7 @@ QDF_STATUS sme_rrm_neighbor_report_request(tpAniSirGlobal pMac, uint8_t /* If already a report is pending, return failure */ if (true == - pMac->rrm.rrmSmeContext.neighborReqControlInfo. + pMac->rrm.rrmSmeContext[0].neighborReqControlInfo. isNeighborRspPending) { sme_err("Neighbor request already pending.. Not allowed"); return QDF_STATUS_E_AGAIN; @@ -1204,7 +1283,7 @@ QDF_STATUS sme_rrm_neighbor_report_request(tpAniSirGlobal pMac, uint8_t } rrm_ll_purge_neighbor_cache(pMac, - &pMac->rrm.rrmSmeContext.neighborReportCache); + &pMac->rrm.rrmSmeContext[0].neighborReportCache); pMsg->messageType = eWNI_SME_NEIGHBOR_REPORT_REQ_IND; pMsg->length = sizeof(tSirNeighborReportReqInd); @@ -1220,16 +1299,17 @@ QDF_STATUS sme_rrm_neighbor_report_request(tpAniSirGlobal pMac, uint8_t /* Neighbor report request message sent successfully to PE. * Now register the callbacks */ - pMac->rrm.rrmSmeContext.neighborReqControlInfo.neighborRspCallbackInfo. - neighborRspCallback = callbackInfo->neighborRspCallback; - pMac->rrm.rrmSmeContext.neighborReqControlInfo.neighborRspCallbackInfo. - neighborRspCallbackContext = + pMac->rrm.rrmSmeContext[0].neighborReqControlInfo. + neighborRspCallbackInfo.neighborRspCallback = + callbackInfo->neighborRspCallback; + pMac->rrm.rrmSmeContext[0].neighborReqControlInfo. + neighborRspCallbackInfo.neighborRspCallbackContext = callbackInfo->neighborRspCallbackContext; - pMac->rrm.rrmSmeContext.neighborReqControlInfo.isNeighborRspPending = + pMac->rrm.rrmSmeContext[0].neighborReqControlInfo.isNeighborRspPending = true; /* Start neighbor response wait timer now */ - qdf_mc_timer_start(&pMac->rrm.rrmSmeContext.neighborReqControlInfo. + qdf_mc_timer_start(&pMac->rrm.rrmSmeContext[0].neighborReqControlInfo. neighborRspWaitTimer, callbackInfo->timeout); return QDF_STATUS_SUCCESS; @@ -1322,7 +1402,9 @@ check_11r_assoc: /** * rrm_store_neighbor_rpt_by_roam_score()-store Neighbor BSS descriptor + * @pMac: Pointer to mac context * @pNeighborReportDesc - Neighbor BSS Descriptor node to be stored in cache + * @index: RRM sme context index * * This API is called to store a given * Neighbor BSS descriptor to the neighbor cache. This function @@ -1332,9 +1414,10 @@ check_11r_assoc: * Return: void. */ static void rrm_store_neighbor_rpt_by_roam_score(tpAniSirGlobal pMac, - tpRrmNeighborReportDesc pNeighborReportDesc) + tpRrmNeighborReportDesc pNeighborReportDesc, + uint8_t index) { - tpRrmSMEContext pSmeRrmContext = &pMac->rrm.rrmSmeContext; + tpRrmSMEContext pSmeRrmContext = &pMac->rrm.rrmSmeContext[0]; tListElem *pEntry; tRrmNeighborReportDesc *pTempNeighborReportDesc; @@ -1410,11 +1493,11 @@ static QDF_STATUS sme_rrm_process_neighbor_report(tpAniSirGlobal pMac, QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; /* Purge the cache on reception of unsolicited neighbor report */ - if (!pMac->rrm.rrmSmeContext.neighborReqControlInfo. - isNeighborRspPending) + if (!pMac->rrm.rrmSmeContext[pNeighborRpt->measurement_idx]. + neighborReqControlInfo.isNeighborRspPending) rrm_ll_purge_neighbor_cache(pMac, - &pMac->rrm.rrmSmeContext. - neighborReportCache); + &pMac->rrm.rrmSmeContext[pNeighborRpt->measurement_idx]. + neighborReportCache); for (i = 0; i < pNeighborRpt->numNeighborReports; i++) { pNeighborReportDesc = @@ -1447,7 +1530,8 @@ static QDF_STATUS sme_rrm_process_neighbor_report(tpAniSirGlobal pMac, if (pNeighborReportDesc->roamScore > 0) { rrm_store_neighbor_rpt_by_roam_score(pMac, - pNeighborReportDesc); + pNeighborReportDesc, + pNeighborRpt->measurement_idx); } else { sme_err("Roam score of BSSID " MAC_ADDRESS_STR " is 0, Ignoring..", @@ -1462,7 +1546,9 @@ static QDF_STATUS sme_rrm_process_neighbor_report(tpAniSirGlobal pMac, } end: - if (!csr_ll_count(&pMac->rrm.rrmSmeContext.neighborReportCache)) + if (!csr_ll_count( + &pMac->rrm.rrmSmeContext[pNeighborRpt->measurement_idx]. + neighborReportCache)) qdf_status = QDF_STATUS_E_FAILURE; rrm_indicate_neighbor_report_result(pMac, qdf_status); @@ -1512,7 +1598,7 @@ QDF_STATUS sme_rrm_msg_processor(tpAniSirGlobal pMac, uint16_t msg_type, /** * rrm_iter_meas_timer_handle() - Timer handler to handlet the timeout - * @ pMac - The handle returned by mac_open. + * @data - Timer data. * * Timer handler to handlet the timeout condition when a specific BT * stop event does not come back, in which case to restore back the @@ -1520,14 +1606,23 @@ QDF_STATUS sme_rrm_msg_processor(tpAniSirGlobal pMac, uint16_t msg_type, * * Return: NULL */ -static void rrm_iter_meas_timer_handle(void *userData) +static void rrm_iter_meas_timer_handle(void *data) { - tpAniSirGlobal pMac = (tpAniSirGlobal) userData; + tpAniSirGlobal pMac; + mac_handle_t mac_handle = cds_get_context(QDF_MODULE_ID_SME); + tpRrmSMEContext sme_rrm_ctx = (tpRrmSMEContext)data; + + pMac = MAC_CONTEXT(mac_handle); + if (!pMac) { + sme_err("Mac ctx is NULL"); + return; + } sme_debug("Randomization timer expired...send on next channel"); /* Issue a scan req for next channel. */ - sme_rrm_issue_scan_req(pMac); + sme_rrm_issue_scan_req(pMac, sme_rrm_ctx->measurement_idx); } + /** * rrm_neighbor_rsp_timeout_handler() - Timer handler to handlet the timeout * @pMac - The handle returned by mac_open. @@ -1547,7 +1642,7 @@ static void rrm_neighbor_rsp_timeout_handler(void *userData) /** * rrm_open() - Initialze all RRM module - * @ pMac: The handle returned by mac_open. + * @pMac: The handle returned by mac_open. * * Initialze all RRM module. * @@ -1557,45 +1652,46 @@ QDF_STATUS rrm_open(tpAniSirGlobal pMac) { QDF_STATUS qdf_status; - tpRrmSMEContext pSmeRrmContext = &pMac->rrm.rrmSmeContext; + tpRrmSMEContext pSmeRrmContext; QDF_STATUS qdf_ret_status = QDF_STATUS_SUCCESS; + uint8_t i; - pSmeRrmContext->rrmConfig.max_randn_interval = 50; /* ms */ + pMac->rrm.rrmConfig.max_randn_interval = 50; /* ms */ - qdf_status = qdf_mc_timer_init(&pSmeRrmContext->IterMeasTimer, - QDF_TIMER_TYPE_SW, - rrm_iter_meas_timer_handle, - (void *)pMac); + for (i = 0; i < MAX_MEASUREMENT_REQUEST; i++) { + pSmeRrmContext = &pMac->rrm.rrmSmeContext[i]; + qdf_status = qdf_mc_timer_init(&pSmeRrmContext->IterMeasTimer, + QDF_TIMER_TYPE_SW, + rrm_iter_meas_timer_handle, + (void *)pSmeRrmContext); - if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { + if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { + sme_err("rrm_open: Fail to init timer"); + return QDF_STATUS_E_FAILURE; + } - QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR, - "rrm_open: Fail to init timer"); + qdf_status = qdf_mc_timer_init( + &pSmeRrmContext->neighborReqControlInfo. + neighborRspWaitTimer, QDF_TIMER_TYPE_SW, + rrm_neighbor_rsp_timeout_handler, + (void *)pMac); - return QDF_STATUS_E_FAILURE; - } + if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { + sme_err("rrm_open: Fail to init neighbor rsp timer"); + return QDF_STATUS_E_FAILURE; + } - qdf_status = - qdf_mc_timer_init(&pSmeRrmContext->neighborReqControlInfo. - neighborRspWaitTimer, QDF_TIMER_TYPE_SW, - rrm_neighbor_rsp_timeout_handler, - (void *)pMac); + pSmeRrmContext->measurement_idx = i; + pSmeRrmContext->neighborReqControlInfo.isNeighborRspPending = + false; - if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { - - QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR, - "rrm_open: Fail to init timer"); - - return QDF_STATUS_E_FAILURE; - } - - pSmeRrmContext->neighborReqControlInfo.isNeighborRspPending = false; - - qdf_ret_status = csr_ll_open(&pSmeRrmContext->neighborReportCache); - if (QDF_STATUS_SUCCESS != qdf_ret_status) { - QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR, - "rrm_open: Fail to open neighbor cache result"); - return QDF_STATUS_E_FAILURE; + qdf_ret_status = + csr_ll_open(&pSmeRrmContext->neighborReportCache); + if (QDF_STATUS_SUCCESS != qdf_ret_status) { + QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR, + "rrm_open: Fail to open neighbor cache result"); + return QDF_STATUS_E_FAILURE; + } } return QDF_STATUS_SUCCESS; @@ -1616,56 +1712,52 @@ QDF_STATUS rrm_close(tpAniSirGlobal pMac) { QDF_STATUS qdf_status = QDF_STATUS_SUCCESS; - tpRrmSMEContext pSmeRrmContext = &pMac->rrm.rrmSmeContext; + tpRrmSMEContext pSmeRrmContext; + uint8_t i; - if (QDF_TIMER_STATE_RUNNING == - qdf_mc_timer_get_current_state(&pSmeRrmContext->IterMeasTimer)) { - qdf_status = qdf_mc_timer_stop(&pSmeRrmContext->IterMeasTimer); - if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { - QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR, - FL("Timer stop fail")); + for (i = 0; i < MAX_MEASUREMENT_REQUEST; i++) { + pSmeRrmContext = &pMac->rrm.rrmSmeContext[i]; + if (QDF_TIMER_STATE_RUNNING == + qdf_mc_timer_get_current_state(&pSmeRrmContext->IterMeasTimer)) { + qdf_status = qdf_mc_timer_stop( + &pSmeRrmContext->IterMeasTimer); + if (QDF_IS_STATUS_ERROR(qdf_status)) + sme_err("Timer stop fail"); } - } - if (pSmeRrmContext->channelList.ChannelList) { - qdf_mem_free(pSmeRrmContext->channelList.ChannelList); - pSmeRrmContext->channelList.ChannelList = NULL; - } - - qdf_status = qdf_mc_timer_destroy(&pSmeRrmContext->IterMeasTimer); - if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { - - QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR, - FL("Fail to destroy timer")); - - } - - if (QDF_TIMER_STATE_RUNNING == - qdf_mc_timer_get_current_state(&pSmeRrmContext-> - neighborReqControlInfo. - neighborRspWaitTimer)) { - qdf_status = qdf_mc_timer_stop(&pSmeRrmContext-> - neighborReqControlInfo. - neighborRspWaitTimer); - if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { - QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR, - FL("Timer stop fail")); + if (pSmeRrmContext->channelList.ChannelList) { + qdf_mem_free(pSmeRrmContext->channelList.ChannelList); + pSmeRrmContext->channelList.ChannelList = NULL; + pSmeRrmContext->channelList.numOfChannels = 0; } + + qdf_status = + qdf_mc_timer_destroy(&pSmeRrmContext->IterMeasTimer); + if (!QDF_IS_STATUS_SUCCESS(qdf_status)) + sme_err("Fail to destroy timer"); + + if (QDF_TIMER_STATE_RUNNING == + qdf_mc_timer_get_current_state(&pSmeRrmContext-> + neighborReqControlInfo. + neighborRspWaitTimer)) { + qdf_status = qdf_mc_timer_stop(&pSmeRrmContext-> + neighborReqControlInfo. + neighborRspWaitTimer); + if (!QDF_IS_STATUS_SUCCESS(qdf_status)) + sme_err("Timer stop fail"); + } + + qdf_status = qdf_mc_timer_destroy(&pSmeRrmContext-> + neighborReqControlInfo. + neighborRspWaitTimer); + if (!QDF_IS_STATUS_SUCCESS(qdf_status)) + sme_err("Fail to destroy timer"); + + rrm_ll_purge_neighbor_cache(pMac, + &pSmeRrmContext->neighborReportCache); + csr_ll_close(&pSmeRrmContext->neighborReportCache); } - qdf_status = - qdf_mc_timer_destroy(&pSmeRrmContext->neighborReqControlInfo. - neighborRspWaitTimer); - if (!QDF_IS_STATUS_SUCCESS(qdf_status)) { - QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR, - FL("Fail to destroy timer")); - - } - - rrm_ll_purge_neighbor_cache(pMac, &pSmeRrmContext->neighborReportCache); - - csr_ll_close(&pSmeRrmContext->neighborReportCache); - return qdf_status; } @@ -1681,7 +1773,7 @@ QDF_STATUS rrm_close(tpAniSirGlobal pMac) QDF_STATUS rrm_change_default_config_param(tpAniSirGlobal pMac, struct rrm_config_param *rrm_config) { - qdf_mem_copy(&pMac->rrm.rrmSmeContext.rrmConfig, rrm_config, + qdf_mem_copy(&pMac->rrm.rrmConfig, rrm_config, sizeof(struct rrm_config_param)); return QDF_STATUS_SUCCESS; @@ -1689,22 +1781,36 @@ QDF_STATUS rrm_change_default_config_param(tpAniSirGlobal pMac, QDF_STATUS rrm_start(tpAniSirGlobal mac_ctx) { - tpRrmSMEContext smerrmctx = &mac_ctx->rrm.rrmSmeContext; + tpRrmSMEContext smerrmctx; + wlan_scan_requester req_id; + uint8_t i; /* Register with scan component */ - smerrmctx->req_id = ucfg_scan_register_requester(mac_ctx->psoc, - "RRM", - sme_rrm_scan_event_callback, - smerrmctx); + req_id = ucfg_scan_register_requester(mac_ctx->psoc, + "RRM", + sme_rrm_scan_event_callback, + mac_ctx); + + for (i = 0; i < MAX_MEASUREMENT_REQUEST; i++) { + smerrmctx = &mac_ctx->rrm.rrmSmeContext[i]; + smerrmctx->req_id = req_id; + } return QDF_STATUS_SUCCESS; } QDF_STATUS rrm_stop(tpAniSirGlobal mac_ctx) { - tpRrmSMEContext smerrmctx = &mac_ctx->rrm.rrmSmeContext; + tpRrmSMEContext smerrmctx; + wlan_scan_requester req_id; + uint8_t i; - ucfg_scan_unregister_requester(mac_ctx->psoc, smerrmctx->req_id); + for (i = 0; i < MAX_MEASUREMENT_REQUEST; i++) { + smerrmctx = &mac_ctx->rrm.rrmSmeContext[i]; + req_id = smerrmctx->req_id; + smerrmctx->req_id = 0; + } + ucfg_scan_unregister_requester(mac_ctx->psoc, req_id); return QDF_STATUS_SUCCESS; } diff --git a/drivers/staging/qcacld-3.0/core/wma/inc/wma_api.h b/drivers/staging/qcacld-3.0/core/wma/inc/wma_api.h index 2565e4f23921..8fef24e2a671 100644 --- a/drivers/staging/qcacld-3.0/core/wma/inc/wma_api.h +++ b/drivers/staging/qcacld-3.0/core/wma/inc/wma_api.h @@ -415,4 +415,12 @@ void wma_wmi_stop(void); */ uint8_t wma_get_mcs_idx(uint16_t max_rate, uint8_t rate_flags, uint8_t *nss, uint8_t *mcs_rate_flag); + +/** + * wma_get_vdev_chan_roam_enabled() -get roam enabled vdev channel. + * @wma_handle: pointer to wma handle. + * + * Return: roam enabled vdev channel + */ +uint8_t wma_get_vdev_chan_roam_enabled(WMA_HANDLE wma_handle); #endif diff --git a/drivers/staging/qcacld-3.0/core/wma/inc/wma_tgt_cfg.h b/drivers/staging/qcacld-3.0/core/wma/inc/wma_tgt_cfg.h index a1f38149e898..3762b8a7a082 100644 --- a/drivers/staging/qcacld-3.0/core/wma/inc/wma_tgt_cfg.h +++ b/drivers/staging/qcacld-3.0/core/wma/inc/wma_tgt_cfg.h @@ -78,6 +78,7 @@ struct wma_tgt_services { bool bcn_reception_stats; uint32_t akm_service_bitmap; bool is_adaptive_11r_roam_supported; + bool stop_all_host_scan_support; bool is_roam_scan_ch_to_host; }; diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_main.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_main.c index e6a2280115cb..38f025e5bb21 100644 --- a/drivers/staging/qcacld-3.0/core/wma/src/wma_main.c +++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_main.c @@ -92,10 +92,7 @@ #include "init_cmd_api.h" #include "wma_coex.h" #include - -#ifdef WLAN_FEATURE_PKT_CAPTURE #include "wlan_pkt_capture_ucfg_api.h" -#endif #define WMA_LOG_COMPLETION_TIMER 3000 /* 3 seconds */ #define WMI_TLV_HEADROOM 128 @@ -5117,6 +5114,10 @@ static inline void wma_update_target_services(struct wmi_unified *wmi_handle, if (wmi_service_enabled(wmi_handle, wmi_service_adaptive_11r_support)) cfg->is_adaptive_11r_roam_supported = true; + if (wmi_service_enabled(wmi_handle, + wmi_service_host_scan_stop_vdev_all)) + cfg->stop_all_host_scan_support = true; + if (wmi_service_enabled(wmi_handle, wmi_service_twt_requestor)) cfg->twt_requestor = true; if (wmi_service_enabled(wmi_handle, wmi_service_twt_responder)) @@ -5128,6 +5129,8 @@ static inline void wma_update_target_services(struct wmi_unified *wmi_handle, if (wmi_service_enabled(wmi_handle, wmi_roam_scan_chan_list_to_host_support)) cfg->is_roam_scan_ch_to_host = true; + if (wmi_service_enabled(wmi_handle, wmi_service_suiteb_roam_support)) + cfg->akm_service_bitmap |= (1 << AKM_SUITEB); } /** @@ -6967,6 +6970,13 @@ int wma_rx_service_ready_ext_event(void *handle, uint8_t *event, wma_get_separate_iface_support(wma_handle)) wlan_res_cfg->nan_separate_iface_support = true; + if (ucfg_pkt_capture_get_mode(wma_handle->psoc) && + wmi_service_enabled(wmi_handle, + wmi_service_packet_capture_support)) + wlan_res_cfg->pktcapture_support = true; + else + wlan_res_cfg->pktcapture_support = false; + return 0; } diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c index c86be6d4f277..b6478258f184 100644 --- a/drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c +++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c @@ -1809,14 +1809,10 @@ QDF_STATUS wma_process_roaming_config(tp_wma_handle wma_handle, break; mode = WMI_ROAM_SCAN_MODE_PERIODIC; - /* Don't use rssi triggered roam scans if external app - * is in control of channel list. - */ - if (roam_req->ChannelCacheType != CHANNEL_LIST_STATIC || - roam_req->roam_force_rssi_trigger) + if (roam_req->roam_force_rssi_trigger) mode |= WMI_ROAM_SCAN_MODE_RSSI_CHANGE; - } else { + } else if (roam_req->roam_force_rssi_trigger) { mode = WMI_ROAM_SCAN_MODE_RSSI_CHANGE; } @@ -2117,14 +2113,10 @@ QDF_STATUS wma_process_roaming_config(tp_wma_handle wma_handle, break; mode = WMI_ROAM_SCAN_MODE_PERIODIC; - /* Don't use rssi triggered roam scans if external app - * is in control of channel list. - */ - if (roam_req->ChannelCacheType != CHANNEL_LIST_STATIC || - roam_req->roam_force_rssi_trigger) + if (roam_req->roam_force_rssi_trigger) mode |= WMI_ROAM_SCAN_MODE_RSSI_CHANGE; - } else { + } else if (roam_req->roam_force_rssi_trigger) { mode = WMI_ROAM_SCAN_MODE_RSSI_CHANGE; } @@ -3985,7 +3977,7 @@ QDF_STATUS wma_roam_scan_fill_self_caps(tp_wma_handle wma_handle, if (val) selfCaps.apsd = 1; - selfCaps.rrm = pMac->rrm.rrmSmeContext.rrmConfig.rrm_enabled; + selfCaps.rrm = pMac->rrm.rrmConfig.rrm_enabled; if (wlan_cfg_get_int(pMac, WNI_CFG_BLOCK_ACK_ENABLED, &val) != QDF_STATUS_SUCCESS) { diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_utils.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_utils.c index 5d617500e39b..a9504c7c0360 100644 --- a/drivers/staging/qcacld-3.0/core/wma/src/wma_utils.c +++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_utils.c @@ -5141,4 +5141,17 @@ int wma_oem_event_handler(void *wma_ctx, uint8_t *event_buff, uint32_t len) return QDF_STATUS_SUCCESS; } + +uint8_t wma_get_vdev_chan_roam_enabled(WMA_HANDLE wma_handle) +{ + uint8_t id; + tp_wma_handle wma = (tp_wma_handle)wma_handle; + + for (id = 0; id < wma->max_bssid; id++) { + if (wma->interfaces[id].roam_offload_enabled) + return wma->interfaces[id].channel; + } + + return 0; +} #endif diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c index f485f541e36d..d6de62ee681e 100644 --- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c +++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c @@ -1904,12 +1904,14 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len); if (pIE == NULL) return _FAIL; + if (ie_len > sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates)) + return _FAIL; memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len); supportRateNum = ie_len; pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len); - if (pIE) + if (pIE && (ie_len <= sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates) - supportRateNum)) memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len); return _SUCCESS; diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c index 663cbe3669e1..d52221ae1b85 100644 --- a/drivers/tty/hvc/hvc_console.c +++ b/drivers/tty/hvc/hvc_console.c @@ -88,8 +88,6 @@ static LIST_HEAD(hvc_structs); */ static DEFINE_SPINLOCK(hvc_structs_lock); -/* Mutex to serialize hvc_open */ -static DEFINE_MUTEX(hvc_open_mutex); /* * This value is used to assign a tty->index value to a hvc_struct based * upon order of exposure via hvc_probe(), when we can not match it to @@ -334,24 +332,16 @@ static int hvc_install(struct tty_driver *driver, struct tty_struct *tty) */ static int hvc_open(struct tty_struct *tty, struct file * filp) { - struct hvc_struct *hp; + struct hvc_struct *hp = tty->driver_data; unsigned long flags; int rc = 0; - mutex_lock(&hvc_open_mutex); - - hp = tty->driver_data; - if (!hp) { - rc = -EIO; - goto out; - } - spin_lock_irqsave(&hp->port.lock, flags); /* Check and then increment for fast path open. */ if (hp->port.count++ > 0) { spin_unlock_irqrestore(&hp->port.lock, flags); hvc_kick(); - goto out; + return 0; } /* else count == 0 */ spin_unlock_irqrestore(&hp->port.lock, flags); @@ -379,8 +369,6 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) /* Force wakeup of the polling thread */ hvc_kick(); -out: - mutex_unlock(&hvc_open_mutex); return rc; } diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 4067f079b08d..0de467c8593d 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1734,6 +1734,8 @@ static int acm_pre_reset(struct usb_interface *intf) static const struct usb_device_id acm_ids[] = { /* quirky and broken devices */ + { USB_DEVICE(0x0424, 0x274e), /* Microchip Technology, Inc. (formerly SMSC) */ + .driver_info = DISABLE_ECHO, }, /* DISABLE ECHO in termios flag */ { USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */ .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */ { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */ diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 7d3130b0209e..f3bbcbd708ae 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -73,11 +73,12 @@ static const struct usb_device_id usb_quirk_list[] = { /* Logitech HD Webcam C270 */ { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, - /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */ + /* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x085c), .driver_info = USB_QUIRK_DELAY_INIT }, /* Logitech ConferenceCam CC3000e */ { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index aeb6f7c84ea0..03bc479d04e0 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -4723,12 +4723,6 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq) epnum, 0); } - ret = usb_add_gadget_udc(dev, &hsotg->gadget); - if (ret) { - dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, - hsotg->ctrl_req); - return ret; - } dwc2_hsotg_dump(hsotg); return 0; diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index daf0d37acb37..c8ac0391e65f 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -452,6 +452,17 @@ static int dwc2_driver_probe(struct platform_device *dev) if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) dwc2_lowlevel_hw_disable(hsotg); +#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ + IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) + /* Postponed adding a new gadget to the udc class driver list */ + if (hsotg->gadget_enabled) { + retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget); + if (retval) { + dwc2_hsotg_remove(hsotg); + goto error; + } + } +#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */ return 0; error: diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c index 4103bf7cf52a..62fad60d0c06 100644 --- a/drivers/usb/gadget/udc/mv_udc_core.c +++ b/drivers/usb/gadget/udc/mv_udc_core.c @@ -2317,7 +2317,8 @@ static int mv_udc_probe(struct platform_device *pdev) return 0; err_create_workqueue: - destroy_workqueue(udc->qwork); + if (udc->qwork) + destroy_workqueue(udc->qwork); err_destroy_dma: dma_pool_destroy(udc->dtd_pool); err_free_dma: diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c index 26b641100639..be72a625dc21 100644 --- a/drivers/usb/host/ehci-exynos.c +++ b/drivers/usb/host/ehci-exynos.c @@ -199,9 +199,8 @@ skip_phy: hcd->rsrc_len = resource_size(res); irq = platform_get_irq(pdev, 0); - if (!irq) { - dev_err(&pdev->dev, "Failed to get IRQ\n"); - err = -ENODEV; + if (irq < 0) { + err = irq; goto fail_io; } diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index 93326974ff4b..265c9af1d2b5 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -229,6 +229,13 @@ static int ehci_pci_setup(struct usb_hcd *hcd) ehci_info(ehci, "applying MosChip frame-index workaround\n"); ehci->frame_index_bug = 1; break; + case PCI_VENDOR_ID_HUAWEI: + /* Synopsys HC bug */ + if (pdev->device == 0xa239) { + ehci_info(ehci, "applying Synopsys HC workaround\n"); + ehci->has_synopsys_hc_bug = 1; + } + break; } /* optional debug port, normally in the first BAR */ diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c index d4e0f7cd96fa..b5592fb518e3 100644 --- a/drivers/usb/host/ohci-sm501.c +++ b/drivers/usb/host/ohci-sm501.c @@ -195,6 +195,7 @@ static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev) struct resource *mem; usb_remove_hcd(hcd); + iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); dma_release_declared_memory(&pdev->dev); diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index 35aecbcac6f7..945e108cffb8 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -726,6 +726,9 @@ static int xhci_mtk_remove(struct platform_device *dev) struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct usb_hcd *shared_hcd = xhci->shared_hcd; + pm_runtime_put_noidle(&dev->dev); + pm_runtime_disable(&dev->dev); + usb_remove_hcd(shared_hcd); xhci->shared_hcd = NULL; xhci_mtk_phy_power_off(mtk); @@ -738,8 +741,6 @@ static int xhci_mtk_remove(struct platform_device *dev) xhci_mtk_sch_exit(mtk); xhci_mtk_clks_disable(mtk); xhci_mtk_ldos_disable(mtk); - pm_runtime_put_sync(&dev->dev); - pm_runtime_disable(&dev->dev); return 0; } diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 67adfc973dc2..b713f9e7768b 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1381,6 +1381,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, xhci->devs[slot_id]->out_ctx, ep_index); ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); + ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */ ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); @@ -4260,6 +4261,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, mutex_lock(hcd->bandwidth_mutex); xhci_change_max_exit_latency(xhci, udev, 0); mutex_unlock(hcd->bandwidth_mutex); + readl_poll_timeout(port_array[port_num], pm_val, + (pm_val & PORT_PLS_MASK) == XDEV_U0, + 100, 10000); return 0; } } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 76c839717f16..2d7706d0f54d 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -718,7 +718,7 @@ struct xhci_ep_ctx { * 4 - TRB error * 5-7 - reserved */ -#define EP_STATE_MASK (0xf) +#define EP_STATE_MASK (0x7) #define EP_STATE_DISABLED 0 #define EP_STATE_RUNNING 1 #define EP_STATE_HALTED 2 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 6d61dcd0e16e..cb797699f614 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -8948,9 +8948,6 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) dio_data.overwrite = 1; inode_unlock(inode); relock = true; - } else if (iocb->ki_flags & IOCB_NOWAIT) { - ret = -EAGAIN; - goto out; } ret = btrfs_delalloc_reserve_space(inode, &data_reserved, offset, count); diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 951c444d83e7..b46fdb2b8d34 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -1755,6 +1755,12 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, inode = d_inode(cfile->dentry); cifsi = CIFS_I(inode); + /* + * We zero the range through ioctl, so we need remove the page caches + * first, otherwise the data may be inconsistent with the server. + */ + truncate_pagecache_range(inode, offset, offset + len - 1); + /* if file not oplocked can't be sure whether asking to extend size */ if (!CIFS_CACHE_READ(cifsi)) if (keep_size == false) { @@ -1824,6 +1830,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon, return rc; } + /* + * We implement the punch hole through ioctl, so we need remove the page + * caches first, otherwise the data may be inconsistent with the server. + */ + truncate_pagecache_range(inode, offset, offset + len - 1); + cifs_dbg(FYI, "offset %lld len %lld", offset, len); fsctl_buf.FileOffset = cpu_to_le64(offset); diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index 01004e7b0657..afa576b6bf68 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -50,6 +50,13 @@ bool f2fs_is_compressed_page(struct page *page) return false; if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page)) return false; + /* + * page->private may be set with pid. + * pid_max is enough to check if it is traced. + */ + if (IS_IO_TRACED_PAGE(page)) + return false; + f2fs_bug_on(F2FS_M_SB(page->mapping), *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC); return true; @@ -507,7 +514,7 @@ bool f2fs_is_compress_backend_ready(struct inode *inode) return f2fs_cops[F2FS_I(inode)->i_compress_algorithm]; } -static mempool_t *compress_page_pool = NULL; +static mempool_t *compress_page_pool; static int num_compress_pages = 512; module_param(num_compress_pages, uint, 0444); MODULE_PARM_DESC(num_compress_pages, @@ -818,7 +825,7 @@ static int f2fs_compressed_blocks(struct compress_ctx *cc) } /* return # of valid blocks in compressed cluster */ -static int f2fs_cluster_blocks(struct compress_ctx *cc, bool compr) +static int f2fs_cluster_blocks(struct compress_ctx *cc) { return __f2fs_cluster_blocks(cc, false); } @@ -832,7 +839,7 @@ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index) .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size, }; - return f2fs_cluster_blocks(&cc, false); + return f2fs_cluster_blocks(&cc); } static bool cluster_may_compress(struct compress_ctx *cc) @@ -883,7 +890,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc, bool prealloc; retry: - ret = f2fs_cluster_blocks(cc, false); + ret = f2fs_cluster_blocks(cc); if (ret <= 0) return ret; @@ -946,7 +953,7 @@ retry: } if (prealloc) { - __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true); + f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true); set_new_dnode(&dn, cc->inode, NULL, NULL, 0); @@ -961,7 +968,7 @@ retry: break; } - __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false); + f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false); } if (likely(!ret)) { @@ -1093,8 +1100,16 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, loff_t psize; int i, err; - if (!IS_NOQUOTA(inode) && !f2fs_trylock_op(sbi)) + if (IS_NOQUOTA(inode)) { + /* + * We need to wait for node_write to avoid block allocation during + * checkpoint. This can only happen to quota writes which can cause + * the below discard race condition. + */ + down_read(&sbi->node_write); + } else if (!f2fs_trylock_op(sbi)) { return -EAGAIN; + } set_new_dnode(&dn, cc->inode, NULL, NULL, 0); @@ -1200,7 +1215,9 @@ unlock_continue: set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN); f2fs_put_dnode(&dn); - if (!IS_NOQUOTA(inode)) + if (IS_NOQUOTA(inode)) + up_read(&sbi->node_write); + else f2fs_unlock_op(sbi); spin_lock(&fi->i_size_lock); @@ -1227,7 +1244,9 @@ out_put_cic: out_put_dnode: f2fs_put_dnode(&dn); out_unlock_op: - if (!IS_NOQUOTA(inode)) + if (IS_NOQUOTA(inode)) + up_read(&sbi->node_write); + else f2fs_unlock_op(sbi); return -EAGAIN; } @@ -1307,6 +1326,12 @@ retry_write: congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT); lock_page(cc->rpages[i]); + + if (!PageDirty(cc->rpages[i])) { + unlock_page(cc->rpages[i]); + continue; + } + clear_page_dirty_for_io(cc->rpages[i]); goto retry_write; } diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index b0b55b70da9e..96c025c6e2ba 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -968,12 +968,13 @@ static void f2fs_release_read_bio(struct bio *bio) /* This can handle encryption stuffs */ static int f2fs_submit_page_read(struct inode *inode, struct page *page, - block_t blkaddr, bool for_write) + block_t blkaddr, int op_flags, bool for_write) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct bio *bio; - bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0, for_write); + bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags, + for_write); if (IS_ERR(bio)) return PTR_ERR(bio); @@ -1163,7 +1164,8 @@ got_it: return page; } - err = f2fs_submit_page_read(inode, page, dn.data_blkaddr, for_write); + err = f2fs_submit_page_read(inode, page, dn.data_blkaddr, + op_flags, for_write); if (err) goto put_err; return page; @@ -1312,7 +1314,7 @@ alloc: set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); old_blkaddr = dn->data_blkaddr; f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr, - &sum, seg_type, NULL, false); + &sum, seg_type, NULL); if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) invalidate_mapping_pages(META_MAPPING(sbi), old_blkaddr, old_blkaddr); @@ -1372,7 +1374,7 @@ map_blocks: return err; } -void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock) +void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock) { if (flag == F2FS_GET_BLOCK_PRE_AIO) { if (lock) @@ -1437,7 +1439,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, next_dnode: if (map->m_may_create) - __do_map_lock(sbi, flag, true); + f2fs_do_map_lock(sbi, flag, true); /* When reading holes, we need its node page */ set_new_dnode(&dn, inode, NULL, NULL, 0); @@ -1586,7 +1588,7 @@ skip: f2fs_put_dnode(&dn); if (map->m_may_create) { - __do_map_lock(sbi, flag, false); + f2fs_do_map_lock(sbi, flag, false); f2fs_balance_fs(sbi, dn.node_changed); } goto next_dnode; @@ -1612,7 +1614,7 @@ sync_out: f2fs_put_dnode(&dn); unlock_out: if (map->m_may_create) { - __do_map_lock(sbi, flag, false); + f2fs_do_map_lock(sbi, flag, false); f2fs_balance_fs(sbi, dn.node_changed); } out: @@ -1760,6 +1762,7 @@ static int f2fs_xattr_fiemap(struct inode *inode, flags |= FIEMAP_EXTENT_LAST; err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags); + trace_f2fs_fiemap(inode, 0, phys, len, flags, err); if (err || err == 1) return err; } @@ -1783,8 +1786,10 @@ static int f2fs_xattr_fiemap(struct inode *inode, flags = FIEMAP_EXTENT_LAST; } - if (phys) + if (phys) { err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags); + trace_f2fs_fiemap(inode, 0, phys, len, flags, err); + } return (err < 0 ? err : 0); } @@ -1878,6 +1883,7 @@ next: ret = fiemap_fill_next_extent(fieinfo, logical, phys, size, flags); + trace_f2fs_fiemap(inode, logical, phys, size, flags, ret); if (ret) goto out; size = 0; @@ -2235,6 +2241,7 @@ int f2fs_mpage_readpages(struct address_space *mapping, #endif unsigned max_nr_pages = nr_pages; int ret = 0; + bool drop_ra = false; map.m_pblk = 0; map.m_lblk = 0; @@ -2245,13 +2252,25 @@ int f2fs_mpage_readpages(struct address_space *mapping, map.m_seg_type = NO_CHECK_TYPE; map.m_may_create = false; + /* + * Two readahead threads for same address range can cause race condition + * which fragments sequential read IOs. So let's avoid each other. + */ + if (pages && is_readahead) { + page = list_last_entry(pages, struct page, lru); + if (READ_ONCE(F2FS_I(inode)->ra_offset) == page_index(page)) + drop_ra = true; + else + WRITE_ONCE(F2FS_I(inode)->ra_offset, page_index(page)); + } + for (; nr_pages; nr_pages--) { if (pages) { page = list_last_entry(pages, struct page, lru); prefetchw(&page->flags); list_del(&page->lru); - if (add_to_page_cache_lru(page, mapping, + if (drop_ra || add_to_page_cache_lru(page, mapping, page_index(page), readahead_gfp_mask(mapping))) goto next_page; @@ -2316,6 +2335,9 @@ next_page: BUG_ON(pages && !list_empty(pages)); if (bio) __submit_bio(F2FS_I_SB(inode), bio, DATA); + + if (pages && is_readahead && !drop_ra) + WRITE_ONCE(F2FS_I(inode)->ra_offset, -1); return pages ? 0 : ret; } @@ -2673,8 +2695,20 @@ write: /* Dentry/quota blocks are controlled by checkpoint */ if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) { + /* + * We need to wait for node_write to avoid block allocation during + * checkpoint. This can only happen to quota writes which can cause + * the below discard race condition. + */ + if (IS_NOQUOTA(inode)) + down_read(&sbi->node_write); + fio.need_lock = LOCK_DONE; err = f2fs_do_write_data_page(&fio); + + if (IS_NOQUOTA(inode)) + up_read(&sbi->node_write); + goto done; } @@ -3173,7 +3207,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi, if (f2fs_has_inline_data(inode) || (pos & PAGE_MASK) >= i_size_read(inode)) { - __do_map_lock(sbi, flag, true); + f2fs_do_map_lock(sbi, flag, true); locked = true; } @@ -3210,7 +3244,7 @@ restart: err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); if (err || dn.data_blkaddr == NULL_ADDR) { f2fs_put_dnode(&dn); - __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, + f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true); WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO); locked = true; @@ -3226,7 +3260,7 @@ out: f2fs_put_dnode(&dn); unlock_out: if (locked) - __do_map_lock(sbi, flag, false); + f2fs_do_map_lock(sbi, flag, false); return err; } @@ -3337,7 +3371,7 @@ repeat: err = -EFSCORRUPTED; goto fail; } - err = f2fs_submit_page_read(inode, page, blkaddr, true); + err = f2fs_submit_page_read(inode, page, blkaddr, 0, true); if (err) goto fail; @@ -3645,10 +3679,9 @@ static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block) } f2fs_put_dnode(&dn); - return blknr; #else - return -EOPNOTSUPP; + return 0; #endif } @@ -3656,18 +3689,26 @@ static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block) static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) { struct inode *inode = mapping->host; + struct buffer_head tmp = { + .b_size = i_blocksize(inode), + }; + sector_t blknr = 0; if (f2fs_has_inline_data(inode)) - return 0; + goto out; /* make sure allocating whole blocks */ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) filemap_write_and_wait(mapping); if (f2fs_compressed_file(inode)) - return f2fs_bmap_compress(inode, block); + blknr = f2fs_bmap_compress(inode, block); - return generic_block_bmap(mapping, block, get_data_block_bmap); + if (!get_data_block_bmap(inode, block, &tmp, 0)) + blknr = tmp.b_blocknr; +out: + trace_f2fs_bmap(inode, block, blknr); + return blknr; } #ifdef CONFIG_MIGRATION diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index 05e9ad91167e..2766abe0c300 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -174,6 +174,26 @@ static void update_general_status(struct f2fs_sb_info *sbi) for (i = META_CP; i < META_MAX; i++) si->meta_count[i] = atomic_read(&sbi->meta_count[i]); + for (i = 0; i < NO_CHECK_TYPE; i++) { + si->dirty_seg[i] = 0; + si->full_seg[i] = 0; + si->valid_blks[i] = 0; + } + + for (i = 0; i < MAIN_SEGS(sbi); i++) { + int blks = get_seg_entry(sbi, i)->valid_blocks; + int type = get_seg_entry(sbi, i)->type; + + if (!blks) + continue; + + if (blks == sbi->blocks_per_seg) + si->full_seg[type]++; + else + si->dirty_seg[type]++; + si->valid_blks[type] += blks; + } + for (i = 0; i < 2; i++) { si->segment_count[i] = sbi->segment_count[i]; si->block_count[i] = sbi->block_count[i]; @@ -329,30 +349,50 @@ static int stat_show(struct seq_file *s, void *v) seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n", si->main_area_segs, si->main_area_sections, si->main_area_zones); - seq_printf(s, " - COLD data: %d, %d, %d\n", + seq_printf(s, " TYPE %8s %8s %8s %10s %10s %10s\n", + "segno", "secno", "zoneno", "dirty_seg", "full_seg", "valid_blk"); + seq_printf(s, " - COLD data: %8d %8d %8d %10u %10u %10u\n", si->curseg[CURSEG_COLD_DATA], si->cursec[CURSEG_COLD_DATA], - si->curzone[CURSEG_COLD_DATA]); - seq_printf(s, " - WARM data: %d, %d, %d\n", + si->curzone[CURSEG_COLD_DATA], + si->dirty_seg[CURSEG_COLD_DATA], + si->full_seg[CURSEG_COLD_DATA], + si->valid_blks[CURSEG_COLD_DATA]); + seq_printf(s, " - WARM data: %8d %8d %8d %10u %10u %10u\n", si->curseg[CURSEG_WARM_DATA], si->cursec[CURSEG_WARM_DATA], - si->curzone[CURSEG_WARM_DATA]); - seq_printf(s, " - HOT data: %d, %d, %d\n", + si->curzone[CURSEG_WARM_DATA], + si->dirty_seg[CURSEG_WARM_DATA], + si->full_seg[CURSEG_WARM_DATA], + si->valid_blks[CURSEG_WARM_DATA]); + seq_printf(s, " - HOT data: %8d %8d %8d %10u %10u %10u\n", si->curseg[CURSEG_HOT_DATA], si->cursec[CURSEG_HOT_DATA], - si->curzone[CURSEG_HOT_DATA]); - seq_printf(s, " - Dir dnode: %d, %d, %d\n", + si->curzone[CURSEG_HOT_DATA], + si->dirty_seg[CURSEG_HOT_DATA], + si->full_seg[CURSEG_HOT_DATA], + si->valid_blks[CURSEG_HOT_DATA]); + seq_printf(s, " - Dir dnode: %8d %8d %8d %10u %10u %10u\n", si->curseg[CURSEG_HOT_NODE], si->cursec[CURSEG_HOT_NODE], - si->curzone[CURSEG_HOT_NODE]); - seq_printf(s, " - File dnode: %d, %d, %d\n", + si->curzone[CURSEG_HOT_NODE], + si->dirty_seg[CURSEG_HOT_NODE], + si->full_seg[CURSEG_HOT_NODE], + si->valid_blks[CURSEG_HOT_NODE]); + seq_printf(s, " - File dnode: %8d %8d %8d %10u %10u %10u\n", si->curseg[CURSEG_WARM_NODE], si->cursec[CURSEG_WARM_NODE], - si->curzone[CURSEG_WARM_NODE]); - seq_printf(s, " - Indir nodes: %d, %d, %d\n", + si->curzone[CURSEG_WARM_NODE], + si->dirty_seg[CURSEG_WARM_NODE], + si->full_seg[CURSEG_WARM_NODE], + si->valid_blks[CURSEG_WARM_NODE]); + seq_printf(s, " - Indir nodes: %8d %8d %8d %10u %10u %10u\n", si->curseg[CURSEG_COLD_NODE], si->cursec[CURSEG_COLD_NODE], - si->curzone[CURSEG_COLD_NODE]); + si->curzone[CURSEG_COLD_NODE], + si->dirty_seg[CURSEG_COLD_NODE], + si->full_seg[CURSEG_COLD_NODE], + si->valid_blks[CURSEG_COLD_NODE]); seq_printf(s, "\n - Valid: %d\n - Dirty: %d\n", si->main_area_segs - si->dirty_count - si->prefree_count - si->free_segs, diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 7402df261a0f..139cf01868ba 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -779,7 +779,7 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name, return err; /* - * An immature stakable filesystem shows a race condition between lookup + * An immature stackable filesystem shows a race condition between lookup * and create. If we have same task when doing lookup and create, it's * definitely fine as expected by VFS normally. Otherwise, let's just * verify on-disk dentry one more time, which guarantees filesystem diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c index e60078460ad1..686c68b98610 100644 --- a/fs/f2fs/extent_cache.c +++ b/fs/f2fs/extent_cache.c @@ -325,9 +325,10 @@ static void __drop_largest_extent(struct extent_tree *et, } /* return true, if inode page is changed */ -static bool __f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext) +static void __f2fs_init_extent_tree(struct inode *inode, struct page *ipage) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct f2fs_extent *i_ext = ipage ? &F2FS_INODE(ipage)->i_ext : NULL; struct extent_tree *et; struct extent_node *en; struct extent_info ei; @@ -335,16 +336,18 @@ static bool __f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_e if (!f2fs_may_extent_tree(inode)) { /* drop largest extent */ if (i_ext && i_ext->len) { + f2fs_wait_on_page_writeback(ipage, NODE, true, true); i_ext->len = 0; - return true; + set_page_dirty(ipage); + return; } - return false; + return; } et = __grab_extent_tree(inode); if (!i_ext || !i_ext->len) - return false; + return; get_extent_info(&ei, i_ext); @@ -360,17 +363,14 @@ static bool __f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_e } out: write_unlock(&et->lock); - return false; } -bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext) +void f2fs_init_extent_tree(struct inode *inode, struct page *ipage) { - bool ret = __f2fs_init_extent_tree(inode, i_ext); + __f2fs_init_extent_tree(inode, ipage); if (!F2FS_I(inode)->extent_tree) set_inode_flag(inode, FI_NO_EXTENT); - - return ret; } static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs, diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 5ff675e6e0d6..a11c32b368df 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -434,6 +434,8 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal, _IOR(F2FS_IOCTL_MAGIC, 18, __u64) #define F2FS_IOC_RESERVE_COMPRESS_BLOCKS \ _IOR(F2FS_IOCTL_MAGIC, 19, __u64) +#define F2FS_IOC_SEC_TRIM_FILE _IOW(F2FS_IOCTL_MAGIC, 20, \ + struct f2fs_sectrim_range) #define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY #define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY @@ -450,6 +452,13 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal, #define F2FS_GOING_DOWN_METAFLUSH 0x3 /* going down with meta flush */ #define F2FS_GOING_DOWN_NEED_FSCK 0x4 /* going down to trigger fsck */ +/* + * Flags used by F2FS_IOC_SEC_TRIM_FILE + */ +#define F2FS_TRIM_FILE_DISCARD 0x1 /* send discard command */ +#define F2FS_TRIM_FILE_ZEROOUT 0x2 /* zero out */ +#define F2FS_TRIM_FILE_MASK 0x3 + #if defined(__KERNEL__) && defined(CONFIG_COMPAT) /* * ioctl commands in 32 bit emulation @@ -485,6 +494,12 @@ struct f2fs_flush_device { u32 segments; /* # of segments to flush */ }; +struct f2fs_sectrim_range { + u64 start; + u64 len; + u64 flags; +}; + /* for inline stuff */ #define DEF_INLINE_RESERVED_SIZE 1 static inline int get_extra_isize(struct inode *inode); @@ -788,6 +803,7 @@ struct f2fs_inode_info { struct list_head inmem_pages; /* inmemory pages managed by f2fs */ struct task_struct *inmem_task; /* store inmemory task */ struct mutex inmem_lock; /* lock for inmemory pages */ + pgoff_t ra_offset; /* ongoing readahead offset */ struct extent_tree *extent_tree; /* cached extent_tree entry */ /* avoid racing between foreground op and gc */ @@ -1261,7 +1277,8 @@ enum { GC_NORMAL, GC_IDLE_CB, GC_IDLE_GREEDY, - GC_URGENT, + GC_URGENT_HIGH, + GC_URGENT_LOW, }; enum { @@ -1307,6 +1324,14 @@ enum fsync_mode { #define IS_DUMMY_WRITTEN_PAGE(page) \ (page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE) +#ifdef CONFIG_F2FS_IO_TRACE +#define IS_IO_TRACED_PAGE(page) \ + (page_private(page) > 0 && \ + page_private(page) < (unsigned long)PID_MAX_LIMIT) +#else +#define IS_IO_TRACED_PAGE(page) (0) +#endif + #ifdef CONFIG_FS_ENCRYPTION #define DUMMY_ENCRYPTION_ENABLED(sbi) \ (unlikely(F2FS_OPTION(sbi).test_dummy_encryption)) @@ -1388,6 +1413,8 @@ struct decompress_io_ctx { #define MAX_COMPRESS_LOG_SIZE 8 #define MAX_COMPRESS_WINDOW_SIZE ((PAGE_SIZE) << MAX_COMPRESS_LOG_SIZE) +#define MOUNT_NAME_SIZE 20 + struct f2fs_sb_info { struct super_block *sb; /* pointer to VFS super block */ struct proc_dir_entry *s_proc; /* proc entry */ @@ -1511,6 +1538,7 @@ struct f2fs_sb_info { unsigned int gc_mode; /* current GC state */ unsigned int next_victim_seg[2]; /* next segment in victim section */ unsigned int rapid_gc; /* is rapid GC running */ + /* for skip statistic */ unsigned int atomic_files; /* # of opened atomic file */ unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */ @@ -1568,6 +1596,8 @@ struct f2fs_sb_info { /* For sysfs suppport */ struct kobject s_kobj; struct completion s_kobj_unregister; + int s_mount_id; + char s_mount_name[MOUNT_NAME_SIZE]; /* For shrinker support */ struct list_head s_list; @@ -2454,7 +2484,7 @@ static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, static inline bool is_idle(struct f2fs_sb_info *sbi, int type) { - if (sbi->gc_mode == GC_URGENT) + if (sbi->gc_mode == GC_URGENT_HIGH) return true; if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || @@ -2472,6 +2502,10 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type) atomic_read(&SM_I(sbi)->fcc_info->queued_flush)) return false; + if (sbi->gc_mode == GC_URGENT_LOW && + (type == DISCARD_TIME || type == GC_TIME)) + return true; + return f2fs_time_over(sbi, type); } @@ -3324,9 +3358,10 @@ block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi); int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable); void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); -void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, +void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, unsigned int start, unsigned int end); -void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi, int type); +void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type); +void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc); @@ -3349,7 +3384,7 @@ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, block_t old_blkaddr, block_t *new_blkaddr, struct f2fs_summary *sum, int type, - struct f2fs_io_info *fio, bool add_list); + struct f2fs_io_info *fio); void f2fs_wait_on_page_writeback(struct page *page, enum page_type type, bool ordered, bool locked); void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr); @@ -3448,7 +3483,7 @@ struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index, struct page *f2fs_get_new_data_page(struct inode *inode, struct page *ipage, pgoff_t index, bool new_i_size); int f2fs_do_write_data_page(struct f2fs_io_info *fio); -void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock); +void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock); int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int create, int flag); int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, @@ -3541,6 +3576,9 @@ struct f2fs_stat_info { int curseg[NR_CURSEG_TYPE]; int cursec[NR_CURSEG_TYPE]; int curzone[NR_CURSEG_TYPE]; + unsigned int dirty_seg[NR_CURSEG_TYPE]; + unsigned int full_seg[NR_CURSEG_TYPE]; + unsigned int valid_blks[NR_CURSEG_TYPE]; unsigned int meta_count[META_MAX]; unsigned int segment_count[2]; @@ -3800,7 +3838,7 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi, struct rb_root_cached *root); unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink); -bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext); +void f2fs_init_extent_tree(struct inode *inode, struct page *ipage); void f2fs_drop_extent_tree(struct inode *inode); unsigned int f2fs_destroy_extent_node(struct inode *inode); void f2fs_destroy_extent_tree(struct inode *inode); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 9b9ab6915be6..06fd26bea82c 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "f2fs.h" #include "node.h" @@ -104,11 +105,11 @@ static int f2fs_vm_page_mkwrite(struct vm_fault *vmf) if (need_alloc) { /* block allocation */ - __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true); + f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true); set_new_dnode(&dn, inode, NULL, NULL, 0); err = f2fs_get_block(&dn, page->index); f2fs_put_dnode(&dn); - __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false); + f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false); } #ifdef CONFIG_F2FS_FS_COMPRESSION @@ -1364,8 +1365,6 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) truncate_pagecache(inode, offset); new_size = i_size_read(inode) - len; - truncate_pagecache(inode, new_size); - ret = f2fs_truncate_blocks(inode, new_size, true); up_write(&F2FS_I(inode)->i_mmap_sem); if (!ret) @@ -1651,7 +1650,7 @@ next_alloc: map.m_seg_type = CURSEG_COLD_DATA_PINNED; f2fs_lock_op(sbi); - f2fs_allocate_new_segments(sbi, CURSEG_COLD_DATA); + f2fs_allocate_new_segment(sbi, CURSEG_COLD_DATA); f2fs_unlock_op(sbi); err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO); @@ -2464,6 +2463,11 @@ do_more: } ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start)); + if (ret) { + if (ret == -EBUSY) + ret = -EAGAIN; + goto out; + } range.start += BLKS_PER_SEC(sbi); if (range.start <= end) goto do_more; @@ -3607,6 +3611,181 @@ out: return ret; } +static int f2fs_secure_erase(struct block_device *bdev, block_t block, + block_t len, u32 flags) +{ + struct request_queue *q = bdev_get_queue(bdev); + sector_t sector = SECTOR_FROM_BLOCK(block); + sector_t nr_sects = SECTOR_FROM_BLOCK(len); + int ret = 0; + + if (!q) + return -ENXIO; + + if (flags & F2FS_TRIM_FILE_DISCARD) + ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS, + blk_queue_secure_erase(q) ? + BLKDEV_DISCARD_SECURE : 0); + + if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) + ret = blkdev_issue_zeroout(bdev, sector, nr_sects, GFP_NOFS, 0); + + return ret; +} + +static int f2fs_sec_trim_file(struct file *filp, unsigned long arg) +{ + struct inode *inode = file_inode(filp); + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct address_space *mapping = inode->i_mapping; + struct block_device *prev_bdev = NULL; + struct f2fs_sectrim_range range; + pgoff_t index, pg_end; + block_t prev_block = 0, len = 0; + u64 end_addr; + bool to_end; + int ret = 0; + + if (!(filp->f_mode & FMODE_WRITE)) + return -EBADF; + + if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg, + sizeof(range))) + return -EFAULT; + + if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) || + !S_ISREG(inode->i_mode)) + return -EINVAL; + + if ((range.flags & F2FS_TRIM_FILE_DISCARD) && + !f2fs_hw_support_discard(sbi)) + return -EOPNOTSUPP; + + file_start_write(filp); + inode_lock(inode); + + if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode)) { + ret = -EINVAL; + goto err; + } + + if (inode->i_size == 0) + goto err; + + end_addr = range.start + range.len; + if (end_addr > inode->i_size) { + ret = -EINVAL; + goto err; + } + + to_end = (end_addr == inode->i_size); + if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) || + (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) { + ret = -EINVAL; + goto err; + } + + index = F2FS_BYTES_TO_BLK(range.start); + pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE); + + ret = f2fs_convert_inline_inode(inode); + if (ret) + goto err; + + down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + down_write(&F2FS_I(inode)->i_mmap_sem); + + ret = filemap_write_and_wait_range(mapping, range.start, end_addr - 1); + if (ret) + goto out; + + truncate_inode_pages_range(mapping, range.start, + to_end ? -1 : end_addr - 1); + + while (index < pg_end) { + struct dnode_of_data dn; + pgoff_t end_offset, count; + int i; + + set_new_dnode(&dn, inode, NULL, NULL, 0); + ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); + if (ret) { + if (ret == -ENOENT) { + index = f2fs_get_next_page_offset(&dn, index); + continue; + } + goto out; + } + + end_offset = ADDRS_PER_PAGE(dn.node_page, inode); + count = min(end_offset - dn.ofs_in_node, pg_end - index); + for (i = 0; i < count; i++, dn.ofs_in_node++) { + struct block_device *cur_bdev; + block_t blkaddr = f2fs_data_blkaddr(&dn); + + if (!__is_valid_data_blkaddr(blkaddr)) + continue; + + if (!f2fs_is_valid_blkaddr(sbi, blkaddr, + DATA_GENERIC_ENHANCE)) { + ret = -EFSCORRUPTED; + f2fs_put_dnode(&dn); + goto out; + } + + cur_bdev = f2fs_target_device(sbi, blkaddr, NULL); + if (f2fs_is_multi_device(sbi)) { + int di = f2fs_target_device_index(sbi, blkaddr); + + blkaddr -= FDEV(di).start_blk; + } + + if (len) { + if (prev_bdev == cur_bdev && + blkaddr == prev_block + len) { + len++; + } else { + ret = f2fs_secure_erase(prev_bdev, + prev_block, len, range.flags); + if (ret) { + f2fs_put_dnode(&dn); + goto out; + } + + len = 0; + } + } + + if (!len) { + prev_bdev = cur_bdev; + prev_block = blkaddr; + len = 1; + } + } + + f2fs_put_dnode(&dn); + index += count; + + if (fatal_signal_pending(current)) { + ret = -EINTR; + goto out; + } + cond_resched(); + } + + if (len) + ret = f2fs_secure_erase(prev_bdev, prev_block, len, + range.flags); +out: + up_write(&F2FS_I(inode)->i_mmap_sem); + up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); +err: + inode_unlock(inode); + file_end_write(filp); + + return ret; +} + long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) @@ -3673,6 +3852,8 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return f2fs_release_compress_blocks(filp, arg); case F2FS_IOC_RESERVE_COMPRESS_BLOCKS: return f2fs_reserve_compress_blocks(filp, arg); + case F2FS_IOC_SEC_TRIM_FILE: + return f2fs_sec_trim_file(filp, arg); default: return -ENOTTY; } @@ -3831,6 +4012,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case F2FS_IOC_GET_COMPRESS_BLOCKS: case F2FS_IOC_RELEASE_COMPRESS_BLOCKS: case F2FS_IOC_RESERVE_COMPRESS_BLOCKS: + case F2FS_IOC_SEC_TRIM_FILE: break; default: return -ENOIOCTLCMD; diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 9a7e9536a653..8db9df42c6ba 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -51,6 +51,9 @@ static inline void rapid_gc_set_wakelock(void) mutex_unlock(&gc_wakelock_mutex); } +static unsigned int count_bits(const unsigned long *addr, + unsigned int offset, unsigned int len); + static int gc_thread_func(void *data) { struct f2fs_sb_info *sbi = data; @@ -72,7 +75,7 @@ static int gc_thread_func(void *data) rapid_gc_set_wakelock(); // Use 1 instead of 0 to allow thread interrupts wait_ms = 1; - sbi->gc_mode = GC_URGENT; + sbi->gc_mode = GC_URGENT_HIGH; } else { rapid_gc_set_wakelock(); wait_ms = gc_th->min_sleep_time; @@ -121,7 +124,7 @@ static int gc_thread_func(void *data) * invalidated soon after by user update or deletion. * So, I'd like to wait some time to collect dirty segments. */ - if (sbi->gc_mode == GC_URGENT || sbi->rapid_gc) { + if (sbi->gc_mode == GC_URGENT_HIGH || sbi->rapid_gc) { if (!sbi->rapid_gc) wait_ms = gc_th->urgent_sleep_time; down_write(&sbi->gc_lock); @@ -363,7 +366,7 @@ static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type) gc_mode = GC_CB; break; case GC_IDLE_GREEDY: - case GC_URGENT: + case GC_URGENT_HIGH: gc_mode = GC_GREEDY; break; } @@ -377,14 +380,20 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type, if (p->alloc_mode == SSR) { p->gc_mode = GC_GREEDY; - p->dirty_segmap = dirty_i->dirty_segmap[type]; + p->dirty_bitmap = dirty_i->dirty_segmap[type]; p->max_search = dirty_i->nr_dirty[type]; p->ofs_unit = 1; } else { p->gc_mode = select_gc_type(sbi, gc_type); - p->dirty_segmap = dirty_i->dirty_segmap[DIRTY]; - p->max_search = dirty_i->nr_dirty[DIRTY]; p->ofs_unit = sbi->segs_per_sec; + if (__is_large_section(sbi)) { + p->dirty_bitmap = dirty_i->dirty_secmap; + p->max_search = count_bits(p->dirty_bitmap, + 0, MAIN_SECS(sbi)); + } else { + p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY]; + p->max_search = dirty_i->nr_dirty[DIRTY]; + } } /* @@ -392,7 +401,7 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type, * foreground GC and urgent GC cases. */ if (gc_type != FG_GC && - (sbi->gc_mode != GC_URGENT) && + (sbi->gc_mode != GC_URGENT_HIGH) && p->max_search > sbi->max_victim_search) p->max_search = sbi->max_victim_search; @@ -511,6 +520,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, unsigned int secno, last_victim; unsigned int last_segment; unsigned int nsearched = 0; + int ret = 0; mutex_lock(&dirty_i->seglist_lock); last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec; @@ -522,12 +532,19 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, p.min_cost = get_max_cost(sbi, &p); if (*result != NULL_SEGNO) { - if (get_valid_blocks(sbi, *result, false) && - !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) + if (!get_valid_blocks(sbi, *result, false)) { + ret = -ENODATA; + goto out; + } + + if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) + ret = -EBUSY; + else p.min_segno = *result; goto out; } + ret = -ENODATA; if (p.max_search == 0) goto out; @@ -555,10 +572,14 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, } while (1) { - unsigned long cost; - unsigned int segno; + unsigned long cost, *dirty_bitmap; + unsigned int unit_no, segno; - segno = find_next_bit(p.dirty_segmap, last_segment, p.offset); + dirty_bitmap = p.dirty_bitmap; + unit_no = find_next_bit(dirty_bitmap, + last_segment / p.ofs_unit, + p.offset / p.ofs_unit); + segno = unit_no * p.ofs_unit; if (segno >= last_segment) { if (sm->last_victim[p.gc_mode]) { last_segment = @@ -571,14 +592,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, } p.offset = segno + p.ofs_unit; - if (p.ofs_unit > 1) { - p.offset -= segno % p.ofs_unit; - nsearched += count_bits(p.dirty_segmap, - p.offset - p.ofs_unit, - p.ofs_unit); - } else { - nsearched++; - } + nsearched++; #ifdef CONFIG_F2FS_CHECK_FS /* @@ -611,9 +625,10 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, next: if (nsearched >= p.max_search) { if (!sm->last_victim[p.gc_mode] && segno <= last_victim) - sm->last_victim[p.gc_mode] = last_victim + 1; + sm->last_victim[p.gc_mode] = + last_victim + p.ofs_unit; else - sm->last_victim[p.gc_mode] = segno + 1; + sm->last_victim[p.gc_mode] = segno + p.ofs_unit; sm->last_victim[p.gc_mode] %= (MAIN_SECS(sbi) * sbi->segs_per_sec); break; @@ -630,6 +645,7 @@ got_result: else set_bit(secno, dirty_i->victim_secmap); } + ret = 0; } out: @@ -639,7 +655,7 @@ out: prefree_segments(sbi), free_segments(sbi)); mutex_unlock(&dirty_i->seglist_lock); - return (p.min_segno == NULL_SEGNO) ? 0 : 1; + return ret; } static const struct victim_selection default_v_ops = { @@ -1023,8 +1039,10 @@ static int move_data_block(struct inode *inode, block_t bidx, mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi), fio.old_blkaddr, false); - if (!mpage) + if (!mpage) { + err = -ENOMEM; goto up_out; + } fio.encrypted_page = mpage; @@ -1049,7 +1067,7 @@ static int move_data_block(struct inode *inode, block_t bidx, } f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, - &sum, CURSEG_COLD_DATA, NULL, false); + &sum, CURSEG_COLD_DATA, NULL); fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi), newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS); @@ -1524,10 +1542,9 @@ gc_more: ret = -EINVAL; goto stop; } - if (!__get_victim(sbi, &segno, gc_type)) { - ret = -ENODATA; + ret = __get_victim(sbi, &segno, gc_type); + if (ret) goto stop; - } seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type); if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec) @@ -1632,7 +1649,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi, /* Move out cursegs from the target range */ for (type = CURSEG_HOT_DATA; type < NR_CURSEG_TYPE; type++) - allocate_segment_for_resize(sbi, type, start, end); + f2fs_allocate_segment_for_resize(sbi, type, start, end); /* do GC to move out valid blocks in the range */ for (segno = start; segno <= end; segno += sbi->segs_per_sec) { diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 6e497598c069..652b972dfd7b 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -11,6 +11,7 @@ #include "f2fs.h" #include "node.h" +#include bool f2fs_may_inline_data(struct inode *inode) { @@ -775,6 +776,7 @@ int f2fs_inline_data_fiemap(struct inode *inode, byteaddr += (char *)inline_data_addr(inode, ipage) - (char *)F2FS_INODE(ipage); err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags); + trace_f2fs_fiemap(inode, start, byteaddr, ilen, flags, err); out: f2fs_put_page(ipage, 1); return err; diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index f7cae0dc9502..8d2d8726d65f 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -365,8 +365,7 @@ static int do_read_inode(struct inode *inode) fi->i_pino = le32_to_cpu(ri->i_pino); fi->i_dir_level = ri->i_dir_level; - if (f2fs_init_extent_tree(inode, &ri->i_ext)) - set_page_dirty(node_page); + f2fs_init_extent_tree(inode, node_page); get_inline_info(inode, ri); @@ -400,6 +399,7 @@ static int do_read_inode(struct inode *inode) /* try to recover cold bit for non-dir inode */ if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) { + f2fs_wait_on_page_writeback(node_page, NODE, true, true); set_cold_node(node_page, false); set_page_dirty(node_page); } diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 733e75dd7481..897fd608e462 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -569,15 +569,17 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry) trace_f2fs_unlink_enter(dir, dentry); - if (unlikely(f2fs_cp_error(sbi))) - return -EIO; + if (unlikely(f2fs_cp_error(sbi))) { + err = -EIO; + goto fail; + } err = dquot_initialize(dir); if (err) - return err; + goto fail; err = dquot_initialize(inode); if (err) - return err; + goto fail; de = f2fs_find_entry(dir, &dentry->d_name, &page); if (!de) { diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index d0f5f3de714e..4e6486bf8360 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1039,8 +1039,10 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from) trace_f2fs_truncate_inode_blocks_enter(inode, from); level = get_node_path(inode, from, offset, noffset); - if (level < 0) + if (level < 0) { + trace_f2fs_truncate_inode_blocks_exit(inode, level); return level; + } page = f2fs_get_node_page(sbi, inode->i_ino); if (IS_ERR(page)) { @@ -2108,7 +2110,7 @@ static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, } static int __insert_free_nid(struct f2fs_sb_info *sbi, - struct free_nid *i, enum nid_state state) + struct free_nid *i) { struct f2fs_nm_info *nm_i = NM_I(sbi); @@ -2116,10 +2118,8 @@ static int __insert_free_nid(struct f2fs_sb_info *sbi, if (err) return err; - f2fs_bug_on(sbi, state != i->state); - nm_i->nid_cnt[state]++; - if (state == FREE_NID) - list_add_tail(&i->list, &nm_i->free_nid_list); + nm_i->nid_cnt[FREE_NID]++; + list_add_tail(&i->list, &nm_i->free_nid_list); return 0; } @@ -2241,7 +2241,7 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, } } ret = true; - err = __insert_free_nid(sbi, i, FREE_NID); + err = __insert_free_nid(sbi, i); err_out: if (update) { update_free_nid_bitmap(sbi, nid, ret, build); diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 7cb7ec838a6f..0b837856098a 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -742,7 +742,7 @@ next: f2fs_put_page(page, 1); } if (!err) - f2fs_allocate_new_segments(sbi, NO_CHECK_TYPE); + f2fs_allocate_new_segments(sbi); return err; } diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 8c071ae6b08f..91f39b7f78b0 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -174,7 +174,7 @@ bool f2fs_need_SSR(struct f2fs_sb_info *sbi) if (f2fs_lfs_mode(sbi)) return false; - if (sbi->gc_mode == GC_URGENT) + if (sbi->gc_mode == GC_URGENT_HIGH) return true; if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) return true; @@ -796,6 +796,18 @@ static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, } if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t])) dirty_i->nr_dirty[t]++; + + if (__is_large_section(sbi)) { + unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); + unsigned short valid_blocks = + get_valid_blocks(sbi, segno, true); + + f2fs_bug_on(sbi, unlikely(!valid_blocks || + valid_blocks == BLKS_PER_SEC(sbi))); + + if (!IS_CURSEC(sbi, secno)) + set_bit(secno, dirty_i->dirty_secmap); + } } } @@ -803,6 +815,7 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, enum dirty_type dirty_type) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + unsigned short valid_blocks; if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type])) dirty_i->nr_dirty[dirty_type]--; @@ -814,13 +827,26 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t])) dirty_i->nr_dirty[t]--; - if (get_valid_blocks(sbi, segno, true) == 0) { + valid_blocks = get_valid_blocks(sbi, segno, true); + if (valid_blocks == 0) { clear_bit(GET_SEC_FROM_SEG(sbi, segno), dirty_i->victim_secmap); #ifdef CONFIG_F2FS_CHECK_FS clear_bit(segno, SIT_I(sbi)->invalid_segmap); #endif } + if (__is_large_section(sbi)) { + unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); + + if (!valid_blocks || + valid_blocks == BLKS_PER_SEC(sbi)) { + clear_bit(secno, dirty_i->dirty_secmap); + return; + } + + if (!IS_CURSEC(sbi, secno)) + set_bit(secno, dirty_i->dirty_secmap); + } } } @@ -1713,7 +1739,7 @@ static int issue_discard_thread(void *data) wait_event_interruptible_timeout(*q, kthread_should_stop() || freezing(current) || dcc->discard_wake, - msecs_to_jiffies((sbi->gc_mode == GC_URGENT) ? + msecs_to_jiffies((sbi->gc_mode == GC_URGENT_HIGH) ? 1 : wait_ms)); if (dcc->discard_wake) @@ -1734,7 +1760,7 @@ static int issue_discard_thread(void *data) continue; } - if (sbi->gc_mode == GC_URGENT) + if (sbi->gc_mode == GC_URGENT_HIGH) __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1); sb_start_intwrite(sbi->sb); @@ -2605,7 +2631,7 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type) bool reversed = false; /* f2fs_need_SSR() already forces to do this */ - if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) { + if (!v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) { curseg->next_segno = segno; return 1; } @@ -2632,7 +2658,7 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type) for (; cnt-- > 0; reversed ? i-- : i++) { if (i == type) continue; - if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) { + if (!v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) { curseg->next_segno = segno; return 1; } @@ -2674,7 +2700,7 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi, stat_inc_seg_type(sbi, curseg); } -void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, +void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, unsigned int start, unsigned int end) { struct curseg_info *curseg = CURSEG_I(sbi, type); @@ -2707,28 +2733,35 @@ unlock: up_read(&SM_I(sbi)->curseg_lock); } -void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi, int type) +static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type) { - struct curseg_info *curseg; + struct curseg_info *curseg = CURSEG_I(sbi, type); unsigned int old_segno; + + if (!curseg->next_blkoff && + !get_valid_blocks(sbi, curseg->segno, false) && + !get_ckpt_valid_blocks(sbi, curseg->segno)) + return; + + old_segno = curseg->segno; + SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true); + locate_dirty_segment(sbi, old_segno); +} + +void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type) +{ + down_write(&SIT_I(sbi)->sentry_lock); + __allocate_new_segment(sbi, type); + up_write(&SIT_I(sbi)->sentry_lock); +} + +void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi) +{ int i; down_write(&SIT_I(sbi)->sentry_lock); - - for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { - if (type != NO_CHECK_TYPE && i != type) - continue; - - curseg = CURSEG_I(sbi, i); - if (type == NO_CHECK_TYPE || curseg->next_blkoff || - get_valid_blocks(sbi, curseg->segno, false) || - get_ckpt_valid_blocks(sbi, curseg->segno)) { - old_segno = curseg->segno; - SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true); - locate_dirty_segment(sbi, old_segno); - } - } - + for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) + __allocate_new_segment(sbi, i); up_write(&SIT_I(sbi)->sentry_lock); } @@ -3089,7 +3122,7 @@ static int __get_segment_type(struct f2fs_io_info *fio) void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, block_t old_blkaddr, block_t *new_blkaddr, struct f2fs_summary *sum, int type, - struct f2fs_io_info *fio, bool add_list) + struct f2fs_io_info *fio) { struct sit_info *sit_i = SIT_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, type); @@ -3107,14 +3140,6 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, type = CURSEG_COLD_DATA; } - /* - * We need to wait for node_write to avoid block allocation during - * checkpoint. This can only happen to quota writes which can cause - * the below discard race condition. - */ - if (IS_DATASEG(type)) - down_write(&sbi->node_write); - down_read(&SM_I(sbi)->curseg_lock); mutex_lock(&curseg->curseg_mutex); @@ -3165,7 +3190,7 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, if (F2FS_IO_ALIGNED(sbi)) fio->retry = false; - if (add_list) { + if (fio) { struct f2fs_bio_info *io; INIT_LIST_HEAD(&fio->list); @@ -3180,9 +3205,6 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, up_read(&SM_I(sbi)->curseg_lock); - if (IS_DATASEG(type)) - up_write(&sbi->node_write); - if (put_pin_sem) up_read(&sbi->pin_sem); } @@ -3217,7 +3239,7 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) down_read(&fio->sbi->io_order_lock); reallocate: f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr, - &fio->new_blkaddr, sum, type, fio, true); + &fio->new_blkaddr, sum, type, fio); if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) invalidate_mapping_pages(META_MAPPING(fio->sbi), fio->old_blkaddr, fio->old_blkaddr); @@ -4293,8 +4315,9 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); struct free_segmap_info *free_i = FREE_I(sbi); - unsigned int segno = 0, offset = 0; + unsigned int segno = 0, offset = 0, secno; unsigned short valid_blocks; + unsigned short blks_per_sec = BLKS_PER_SEC(sbi); while (1) { /* find dirty segment based on free segmap */ @@ -4313,6 +4336,22 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi) __locate_dirty_segment(sbi, segno, DIRTY); mutex_unlock(&dirty_i->seglist_lock); } + + if (!__is_large_section(sbi)) + return; + + mutex_lock(&dirty_i->seglist_lock); + for (segno = 0; segno < MAIN_SECS(sbi); segno += blks_per_sec) { + valid_blocks = get_valid_blocks(sbi, segno, true); + secno = GET_SEC_FROM_SEG(sbi, segno); + + if (!valid_blocks || valid_blocks == blks_per_sec) + continue; + if (IS_CURSEC(sbi, secno)) + continue; + set_bit(secno, dirty_i->dirty_secmap); + } + mutex_unlock(&dirty_i->seglist_lock); } static int init_victim_secmap(struct f2fs_sb_info *sbi) @@ -4349,6 +4388,14 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi) return -ENOMEM; } + if (__is_large_section(sbi)) { + bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); + dirty_i->dirty_secmap = f2fs_kvzalloc(sbi, + bitmap_size, GFP_KERNEL); + if (!dirty_i->dirty_secmap) + return -ENOMEM; + } + init_dirty_segmap(sbi); return init_victim_secmap(sbi); } @@ -4518,6 +4565,12 @@ static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) for (i = 0; i < NR_DIRTY_TYPE; i++) discard_dirty_segmap(sbi, i); + if (__is_large_section(sbi)) { + mutex_lock(&dirty_i->seglist_lock); + kvfree(dirty_i->dirty_secmap); + mutex_unlock(&dirty_i->seglist_lock); + } + destroy_victim_secmap(sbi); SM_I(sbi)->dirty_info = NULL; kvfree(dirty_i); diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index d870da8468f0..6ed65e193e3f 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -166,8 +166,11 @@ enum { struct victim_sel_policy { int alloc_mode; /* LFS or SSR */ int gc_mode; /* GC_CB or GC_GREEDY */ - unsigned long *dirty_segmap; /* dirty segment bitmap */ - unsigned int max_search; /* maximum # of segments to search */ + unsigned long *dirty_bitmap; /* dirty segment/section bitmap */ + unsigned int max_search; /* + * maximum # of segments/sections + * to search + */ unsigned int offset; /* last scanned bitmap offset */ unsigned int ofs_unit; /* bitmap search unit */ unsigned int min_cost; /* minimum cost */ @@ -266,6 +269,7 @@ enum dirty_type { struct dirty_seglist_info { const struct victim_selection *v_ops; /* victim selction operation */ unsigned long *dirty_segmap[NR_DIRTY_TYPE]; + unsigned long *dirty_secmap; struct mutex seglist_lock; /* lock for segment bitmaps */ int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */ unsigned long *victim_secmap; /* background GC victims */ diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 18b6713c5a9d..32e0d8d109bc 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -346,7 +346,7 @@ static int f2fs_set_qf_name(struct super_block *sb, int qtype, set_opt(sbi, QUOTA); return 0; errout: - kvfree(qname); + kfree(qname); return ret; } @@ -358,7 +358,7 @@ static int f2fs_clear_qf_name(struct super_block *sb, int qtype) f2fs_err(sbi, "Cannot change journaled quota options when quota turned on"); return -EINVAL; } - kvfree(F2FS_OPTION(sbi).s_qf_names[qtype]); + kfree(F2FS_OPTION(sbi).s_qf_names[qtype]); F2FS_OPTION(sbi).s_qf_names[qtype] = NULL; return 0; } @@ -449,10 +449,10 @@ static int parse_options(struct super_block *sb, char *options) } else if (!strcmp(name, "sync")) { F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC; } else { - kvfree(name); + kfree(name); return -EINVAL; } - kvfree(name); + kfree(name); break; case Opt_disable_roll_forward: set_opt(sbi, DISABLE_ROLL_FORWARD); @@ -609,17 +609,17 @@ static int parse_options(struct super_block *sb, char *options) if (!strcmp(name, "adaptive")) { if (f2fs_sb_has_blkzoned(sbi)) { f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature"); - kvfree(name); + kfree(name); return -EINVAL; } F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE; } else if (!strcmp(name, "lfs")) { F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS; } else { - kvfree(name); + kfree(name); return -EINVAL; } - kvfree(name); + kfree(name); break; case Opt_io_size_bits: if (args->from && match_int(args, &arg)) @@ -745,10 +745,10 @@ static int parse_options(struct super_block *sb, char *options) } else if (!strcmp(name, "fs-based")) { F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS; } else { - kvfree(name); + kfree(name); return -EINVAL; } - kvfree(name); + kfree(name); break; case Opt_alloc: name = match_strdup(&args[0]); @@ -760,10 +760,10 @@ static int parse_options(struct super_block *sb, char *options) } else if (!strcmp(name, "reuse")) { F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE; } else { - kvfree(name); + kfree(name); return -EINVAL; } - kvfree(name); + kfree(name); break; case Opt_fsync: name = match_strdup(&args[0]); @@ -777,10 +777,10 @@ static int parse_options(struct super_block *sb, char *options) F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_NOBARRIER; } else { - kvfree(name); + kfree(name); return -EINVAL; } - kvfree(name); + kfree(name); break; case Opt_test_dummy_encryption: #ifdef CONFIG_FS_ENCRYPTION @@ -977,6 +977,8 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb) /* Will be used by directory only */ fi->i_dir_level = F2FS_SB(sb)->dir_level; + fi->ra_offset = -1; + return &fi->vfs_inode; } @@ -1230,14 +1232,14 @@ static void f2fs_put_super(struct super_block *sb) sb->s_fs_info = NULL; if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); - kvfree(sbi->raw_super); + kfree(sbi->raw_super); destroy_device_list(sbi); f2fs_destroy_xattr_caches(sbi); mempool_destroy(sbi->write_io_dummy); #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) - kvfree(F2FS_OPTION(sbi).s_qf_names[i]); + kfree(F2FS_OPTION(sbi).s_qf_names[i]); #endif destroy_percpu_info(sbi); for (i = 0; i < NR_PAGE_TYPE; i++) @@ -1245,7 +1247,7 @@ static void f2fs_put_super(struct super_block *sb) #ifdef CONFIG_UNICODE utf8_unload(sbi->s_encoding); #endif - kvfree(sbi); + kfree(sbi); } int f2fs_sync_fs(struct super_block *sb, int sync) @@ -1743,7 +1745,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) GFP_KERNEL); if (!org_mount_opt.s_qf_names[i]) { for (j = 0; j < i; j++) - kvfree(org_mount_opt.s_qf_names[j]); + kfree(org_mount_opt.s_qf_names[j]); return -ENOMEM; } } else { @@ -1868,7 +1870,7 @@ skip: #ifdef CONFIG_QUOTA /* Release old quota file names */ for (i = 0; i < MAXQUOTAS; i++) - kvfree(org_mount_opt.s_qf_names[i]); + kfree(org_mount_opt.s_qf_names[i]); #endif /* Update the POSIXACL Flag */ sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | @@ -1889,7 +1891,7 @@ restore_opts: #ifdef CONFIG_QUOTA F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt; for (i = 0; i < MAXQUOTAS; i++) { - kvfree(F2FS_OPTION(sbi).s_qf_names[i]); + kfree(F2FS_OPTION(sbi).s_qf_names[i]); F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i]; } #endif @@ -3133,7 +3135,7 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi, /* No valid superblock */ if (!*raw_super) - kvfree(super); + kfree(super); else err = 0; @@ -3795,15 +3797,15 @@ free_bio_info: free_options: #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) - kvfree(F2FS_OPTION(sbi).s_qf_names[i]); + kfree(F2FS_OPTION(sbi).s_qf_names[i]); #endif kvfree(options); free_sb_buf: - kvfree(raw_super); + kfree(raw_super); free_sbi: if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); - kvfree(sbi); + kfree(sbi); /* give only one another chance */ if (retry_cnt > 0 && skip_recovery) { diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index 45652a789adc..30b203bca337 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -18,6 +18,7 @@ #include static struct proc_dir_entry *f2fs_proc_root; +static struct ida f2fs_mount_ida; /* Sysfs support for f2fs */ enum { @@ -862,6 +863,9 @@ int __init f2fs_init_sysfs(void) kset_unregister(&f2fs_kset); } else f2fs_proc_root = proc_mkdir("fs/f2fs_dev", NULL); + + ida_init(&f2fs_mount_ida); + return ret; } @@ -871,6 +875,7 @@ void f2fs_exit_sysfs(void) kset_unregister(&f2fs_kset); remove_proc_entry("fs/f2fs_dev", NULL); f2fs_proc_root = NULL; + ida_destroy(&f2fs_mount_ida); } int f2fs_register_sysfs(struct f2fs_sb_info *sbi) @@ -882,12 +887,22 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi) init_completion(&sbi->s_kobj_unregister); err = kobject_init_and_add(&sbi->s_kobj, &f2fs_sb_ktype, NULL, "%s", sb->s_id); - if (err) { - kobject_put(&sbi->s_kobj); - wait_for_completion(&sbi->s_kobj_unregister); - return err; + if (err) + goto err1; + + sbi->s_mount_id = ida_simple_get(&f2fs_mount_ida, 0, 0, GFP_KERNEL); + if (sbi->s_mount_id < 0) { + err = sbi->s_mount_id; + goto err1; } + snprintf(sbi->s_mount_name, MOUNT_NAME_SIZE, "mount_%d", + sbi->s_mount_id); + err = sysfs_create_link(&f2fs_kset.kobj, &sbi->s_kobj, + sbi->s_mount_name); + if (err) + goto err2; + if (f2fs_proc_root) sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root); @@ -902,6 +917,12 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi) &f2fs_seq_victim_bits_fops, sb); } return 0; +err2: + ida_simple_remove(&f2fs_mount_ida, sbi->s_mount_id); +err1: + kobject_put(&sbi->s_kobj); + wait_for_completion(&sbi->s_kobj_unregister); + return err; } void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi) @@ -913,6 +934,8 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi) remove_proc_entry("victim_bits", sbi->s_proc); remove_proc_entry(sbi->sb->s_id, f2fs_proc_root); } + sysfs_remove_link(&f2fs_kset.kobj, sbi->s_mount_name); + ida_simple_remove(&f2fs_mount_ida, sbi->s_mount_id); kobject_del(&sbi->s_kobj); kobject_put(&sbi->s_kobj); } diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index e6ea4511c41c..3cef33b0f7e0 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -396,8 +396,6 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq) { struct inode *inode = dreq->inode; - inode_dio_end(inode); - if (dreq->iocb) { long res = (long) dreq->error; if (dreq->count != 0) { @@ -409,7 +407,10 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq) complete(&dreq->completion); + igrab(inode); nfs_direct_req_release(dreq); + inode_dio_end(inode); + iput(inode); } static void nfs_direct_read_completion(struct nfs_pgio_header *hdr) @@ -539,8 +540,10 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, * generic layer handle the completion. */ if (requested_bytes == 0) { - inode_dio_end(inode); + igrab(inode); nfs_direct_req_release(dreq); + inode_dio_end(inode); + iput(inode); return result < 0 ? result : -EIO; } @@ -957,8 +960,10 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, * generic layer handle the completion. */ if (requested_bytes == 0) { - inode_dio_end(inode); + igrab(inode); nfs_direct_req_release(dreq); + inode_dio_end(inode); + iput(inode); return result < 0 ? result : -EIO; } diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 81cca49a8375..7da2cea1e7a0 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -82,6 +82,7 @@ nfs_file_release(struct inode *inode, struct file *filp) dprintk("NFS: release(%pD2)\n", filp); nfs_inc_stats(inode, NFSIOS_VFSRELEASE); + inode_dio_wait(inode); nfs_file_clear_open_context(filp); return 0; } diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 8dbde5ded042..74f15498c9bf 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -921,9 +921,8 @@ retry: goto out_mds; /* Use a direct mapping of ds_idx to pgio mirror_idx */ - if (WARN_ON_ONCE(pgio->pg_mirror_count != - FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))) - goto out_mds; + if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)) + goto out_eagain; for (i = 0; i < pgio->pg_mirror_count; i++) { ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true); @@ -942,11 +941,15 @@ retry: } return; - +out_eagain: + pnfs_generic_pg_cleanup(pgio); + pgio->pg_error = -EAGAIN; + return; out_mds: pnfs_put_lseg(pgio->pg_lseg); pgio->pg_lseg = NULL; nfs_pageio_reset_write_mds(pgio); + pgio->pg_error = -EAGAIN; } static unsigned int diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 5bb4a89f9045..0773c774e2bf 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -303,7 +303,7 @@ #define OCFS2_MAX_SLOTS 255 /* Slot map indicator for an empty slot */ -#define OCFS2_INVALID_SLOT -1 +#define OCFS2_INVALID_SLOT ((u16)-1) #define OCFS2_VOL_UUID_LEN 16 #define OCFS2_MAX_VOL_LABEL_LEN 64 @@ -339,8 +339,8 @@ struct ocfs2_system_inode_info { enum { BAD_BLOCK_SYSTEM_INODE = 0, GLOBAL_INODE_ALLOC_SYSTEM_INODE, +#define OCFS2_FIRST_ONLINE_SYSTEM_INODE GLOBAL_INODE_ALLOC_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE, -#define OCFS2_FIRST_ONLINE_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE HEARTBEAT_SYSTEM_INODE, GLOBAL_BITMAP_SYSTEM_INODE, USER_QUOTA_SYSTEM_INODE, diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 71f22c8fbffd..4ca2f71565f9 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -2891,9 +2891,12 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res) goto bail; } - inode_alloc_inode = - ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE, - suballoc_slot); + if (suballoc_slot == (u16)OCFS2_INVALID_SLOT) + inode_alloc_inode = ocfs2_get_system_file_inode(osb, + GLOBAL_INODE_ALLOC_SYSTEM_INODE, suballoc_slot); + else + inode_alloc_inode = ocfs2_get_system_file_inode(osb, + INODE_ALLOC_SYSTEM_INODE, suballoc_slot); if (!inode_alloc_inode) { /* the error code could be inaccurate, but we are not able to * get the correct one. */ diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 516e0c57cf9c..a10d9a3c181e 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -2529,6 +2529,13 @@ xfs_agf_verify( be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp))) return false; + if (be32_to_cpu(agf->agf_length) > mp->m_sb.sb_dblocks) + return false; + + if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) || + be32_to_cpu(agf->agf_freeblks) > be32_to_cpu(agf->agf_length)) + return false; + if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 || be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 || be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS || @@ -2540,6 +2547,10 @@ xfs_agf_verify( be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS)) return false; + if (xfs_sb_version_hasrmapbt(&mp->m_sb) && + be32_to_cpu(agf->agf_rmap_blocks) > be32_to_cpu(agf->agf_length)) + return false; + /* * during growfs operations, the perag is not fully initialised, * so we can't use it for any useful checking. growfs ensures we can't @@ -2553,6 +2564,11 @@ xfs_agf_verify( be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length)) return false; + if (xfs_sb_version_hasreflink(&mp->m_sb) && + be32_to_cpu(agf->agf_refcount_blocks) > + be32_to_cpu(agf->agf_length)) + return false; + if (xfs_sb_version_hasreflink(&mp->m_sb) && (be32_to_cpu(agf->agf_refcount_level) < 1 || be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS)) diff --git a/include/linux/dma-mapping-fast.h b/include/linux/dma-mapping-fast.h index e9dabab33a48..b0d821e04424 100644 --- a/include/linux/dma-mapping-fast.h +++ b/include/linux/dma-mapping-fast.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -17,6 +17,7 @@ #include struct dma_iommu_mapping; +struct io_pgtable_ops; struct dma_fast_smmu_mapping { struct device *dev; @@ -35,12 +36,10 @@ struct dma_fast_smmu_mapping { bool have_stale_tlbs; dma_addr_t pgtbl_dma_handle; - av8l_fast_iopte *pgtbl_pmds; + struct io_pgtable_ops *pgtbl_ops; spinlock_t lock; struct notifier_block notifier; - - int is_smmu_pt_coherent; }; #ifdef CONFIG_IOMMU_IO_PGTABLE_FAST diff --git a/include/linux/io-pgtable-fast.h b/include/linux/io-pgtable-fast.h index 78b069369eb7..1d5e993c9d22 100644 --- a/include/linux/io-pgtable-fast.h +++ b/include/linux/io-pgtable-fast.h @@ -15,13 +15,52 @@ #include +/* + * This ought to be private to io-pgtable-fast, but dma-mapping-fast + * currently requires it for a debug usecase. + */ typedef u64 av8l_fast_iopte; -#define iopte_pmd_offset(pmds, iova) (pmds + (iova >> 12)) +struct io_pgtable_ops; + +#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST + +int av8l_fast_map_public(struct io_pgtable_ops *ops, unsigned long iova, + phys_addr_t paddr, size_t size, int prot); + +void av8l_fast_unmap_public(struct io_pgtable_ops *ops, unsigned long iova, + size_t size); + +bool av8l_fast_iova_coherent_public(struct io_pgtable_ops *ops, + unsigned long iova); + +phys_addr_t av8l_fast_iova_to_phys_public(struct io_pgtable_ops *ops, + unsigned long iova); +#else +static inline int +av8l_fast_map_public(struct io_pgtable_ops *ops, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + return -EINVAL; +} +static inline void av8l_fast_unmap_public(struct io_pgtable_ops *ops, + unsigned long iova, size_t size) +{ +} + +static inline bool av8l_fast_iova_coherent_public(struct io_pgtable_ops *ops, + unsigned long iova) +{ + return false; +} +static inline phys_addr_t +av8l_fast_iova_to_phys_public(struct io_pgtable_ops *ops, + unsigned long iova) +{ + return 0; +} +#endif /* CONFIG_IOMMU_IO_PGTABLE_FAST */ -int av8l_fast_map_public(av8l_fast_iopte *ptep, phys_addr_t paddr, size_t size, - int prot); -void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size); /* events for notifiers passed to av8l_register_notify */ #define MAPPED_OVER_STALE_TLB 1 @@ -36,14 +75,18 @@ void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size); */ #define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0xa -void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds, bool skip_sync); +void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, u64 base, + u64 start, u64 end, bool skip_sync); void av8l_register_notify(struct notifier_block *nb); #else /* !CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB */ #define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0 -static inline void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds, +static inline void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, + u64 base, + u64 start, + u64 end, bool skip_sync) { } diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 3e4f4fedda98..2c9fffc697fb 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -74,7 +74,7 @@ struct iommu_domain_geometry { }; struct iommu_pgtbl_info { - void *pmds; + void *ops; }; /* Domain feature flags */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a62f40f633a8..789e84ee019f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2483,7 +2483,7 @@ void synchronize_net(void); int init_dummy_netdev(struct net_device *dev); DECLARE_PER_CPU(int, xmit_recursion); -#define XMIT_RECURSION_LIMIT 10 +#define XMIT_RECURSION_LIMIT 8 static inline int dev_recursion_level(void) { diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 733fad7dfbed..6d15040c642c 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -207,28 +207,34 @@ static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain) static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) { + u16 elem_per_page = p_chain->elem_per_page; + u32 prod = p_chain->u.chain16.prod_idx; + u32 cons = p_chain->u.chain16.cons_idx; u16 used; - used = (u16) (((u32)0x10000 + - (u32)p_chain->u.chain16.prod_idx) - - (u32)p_chain->u.chain16.cons_idx); + if (prod < cons) + prod += (u32)U16_MAX + 1; + + used = (u16)(prod - cons); if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page - - p_chain->u.chain16.cons_idx / p_chain->elem_per_page; + used -= prod / elem_per_page - cons / elem_per_page; return (u16)(p_chain->capacity - used); } static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain) { + u16 elem_per_page = p_chain->elem_per_page; + u64 prod = p_chain->u.chain32.prod_idx; + u64 cons = p_chain->u.chain32.cons_idx; u32 used; - used = (u32) (((u64)0x100000000ULL + - (u64)p_chain->u.chain32.prod_idx) - - (u64)p_chain->u.chain32.cons_idx); + if (prod < cons) + prod += (u64)U32_MAX + 1; + + used = (u32)(prod - cons); if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page - - p_chain->u.chain32.cons_idx / p_chain->elem_per_page; + used -= (u32)(prod / elem_per_page - cons / elem_per_page); return p_chain->capacity - used; } diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index a16e0bdf7751..d19bfdcf7749 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -107,16 +107,17 @@ retry: if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size); + struct skb_shared_info *shinfo = skb_shinfo(skb); - if (skb->len - p_off <= gso_size) - return -EINVAL; + /* Too small packets are not really GSO ones. */ + if (skb->len - p_off > gso_size) { + shinfo->gso_size = gso_size; + shinfo->gso_type = gso_type; - skb_shinfo(skb)->gso_size = gso_size; - skb_shinfo(skb)->gso_type = gso_type; - - /* Header must be checked, and gso_segs computed. */ - skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; - skb_shinfo(skb)->gso_segs = 0; + /* Header must be checked, and gso_segs computed. */ + shinfo->gso_type |= SKB_GSO_DODGY; + shinfo->gso_segs = 0; + } } return 0; diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index deaafa9b09cb..d4da07048aa3 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -361,11 +361,13 @@ enum { ipv4_is_anycast_6to4(a)) /* Flags used for the bind address copy functions. */ -#define SCTP_ADDR6_ALLOWED 0x00000001 /* IPv6 address is allowed by +#define SCTP_ADDR4_ALLOWED 0x00000001 /* IPv4 address is allowed by local sock family */ -#define SCTP_ADDR4_PEERSUPP 0x00000002 /* IPv4 address is supported by +#define SCTP_ADDR6_ALLOWED 0x00000002 /* IPv6 address is allowed by + local sock family */ +#define SCTP_ADDR4_PEERSUPP 0x00000004 /* IPv4 address is supported by peer */ -#define SCTP_ADDR6_PEERSUPP 0x00000004 /* IPv6 address is supported by +#define SCTP_ADDR6_PEERSUPP 0x00000008 /* IPv6 address is supported by peer */ /* Reasons to retransmit. */ diff --git a/include/net/sock.h b/include/net/sock.h index 04cf808ce7d5..366fe76a1a30 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1705,7 +1705,6 @@ static inline int sk_tx_queue_get(const struct sock *sk) static inline void sk_set_socket(struct sock *sk, struct socket *sock) { - sk_tx_queue_clear(sk); sk->sk_socket = sock; } diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 432072728bde..6e6cf4ef9c81 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -1901,6 +1901,69 @@ TRACE_EVENT(f2fs_iostat, __entry->fs_cdrio, __entry->fs_nrio, __entry->fs_mrio) ); +TRACE_EVENT(f2fs_bmap, + + TP_PROTO(struct inode *inode, sector_t lblock, sector_t pblock), + + TP_ARGS(inode, lblock, pblock), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(sector_t, lblock) + __field(sector_t, pblock) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->lblock = lblock; + __entry->pblock = pblock; + ), + + TP_printk("dev = (%d,%d), ino = %lu, lblock:%lld, pblock:%lld", + show_dev_ino(__entry), + (unsigned long long)__entry->lblock, + (unsigned long long)__entry->pblock) +); + +TRACE_EVENT(f2fs_fiemap, + + TP_PROTO(struct inode *inode, sector_t lblock, sector_t pblock, + unsigned long long len, unsigned int flags, int ret), + + TP_ARGS(inode, lblock, pblock, len, flags, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(sector_t, lblock) + __field(sector_t, pblock) + __field(unsigned long long, len) + __field(unsigned int, flags) + __field(int, ret) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->lblock = lblock; + __entry->pblock = pblock; + __entry->len = len; + __entry->flags = flags; + __entry->ret = ret; + ), + + TP_printk("dev = (%d,%d), ino = %lu, lblock:%lld, pblock:%lld, " + "len:%llu, flags:%u, ret:%d", + show_dev_ino(__entry), + (unsigned long long)__entry->lblock, + (unsigned long long)__entry->pblock, + __entry->len, + __entry->flags, + __entry->ret) +); + #endif /* _TRACE_F2FS_H */ /* This part must be outside protection */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 67ac18c06f1b..738f7aa5393f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3945,7 +3945,8 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) */ if (dl_prio(prio)) { if (!dl_prio(p->normal_prio) || - (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { + (pi_task && dl_prio(pi_task->prio) && + dl_entity_preempt(&pi_task->dl, &p->dl))) { p->dl.dl_boosted = 1; queue_flag |= ENQUEUE_REPLENISH; } else diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 304a164f5e7e..9a55c5bc5243 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -15,6 +15,9 @@ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -504,6 +507,16 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, */ strreplace(buts->name, '/', '_'); + /* + * bdev can be NULL, as with scsi-generic, this is a helpful as + * we can be. + */ + if (q->blk_trace) { + pr_warn("Concurrent blktraces are not allowed on %s\n", + buts->name); + return -EBUSY; + } + bt = kzalloc(sizeof(*bt), GFP_KERNEL); if (!bt) return -ENOMEM; diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 6fb5eb7b57dc..13f501337308 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -223,11 +223,17 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file) static int trigger_process_regex(struct trace_event_file *file, char *buff) { - char *command, *next = buff; + char *command, *next; struct event_command *p; int ret = -EINVAL; + next = buff = skip_spaces(buff); command = strsep(&next, ": \t"); + if (next) { + next = skip_spaces(next); + if (!*next) + next = NULL; + } command = (command[0] != '!') ? command : command + 1; mutex_lock(&trigger_cmd_mutex); @@ -630,8 +636,14 @@ event_trigger_callback(struct event_command *cmd_ops, int ret; /* separate the trigger from the filter (t:n [if filter]) */ - if (param && isdigit(param[0])) + if (param && isdigit(param[0])) { trigger = strsep(¶m, " \t"); + if (param) { + param = skip_spaces(param); + if (!*param) + param = NULL; + } + } trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); @@ -1342,6 +1354,11 @@ int event_enable_trigger_func(struct event_command *cmd_ops, trigger = strsep(¶m, " \t"); if (!trigger) return -EINVAL; + if (param) { + param = skip_spaces(param); + if (!*param) + param = NULL; + } system = strsep(&trigger, ":"); if (!trigger) diff --git a/mm/slab_common.c b/mm/slab_common.c index 386898fb0e70..b873a85bbea1 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1473,7 +1473,7 @@ void kzfree(const void *p) if (unlikely(ZERO_OR_NULL_PTR(mem))) return; ks = ksize(mem); - memset(mem, 0, ks); + memzero_explicit(mem, ks); kfree(mem); } EXPORT_SYMBOL(kzfree); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index e870cfc85b14..14ff034e561c 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -190,8 +190,8 @@ struct net_bridge_port_group { struct rcu_head rcu; struct timer_list timer; struct br_ip addr; + unsigned char eth_addr[ETH_ALEN] __aligned(2); unsigned char flags; - unsigned char eth_addr[ETH_ALEN]; }; struct net_bridge_mdb_entry diff --git a/net/core/dev.c b/net/core/dev.c index b4c8c9b777c4..4af3fa399e22 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7770,6 +7770,13 @@ int register_netdevice(struct net_device *dev) rcu_barrier(); dev->reg_state = NETREG_UNREGISTERED; + /* We should put the kobject that hold in + * netdev_unregister_kobject(), otherwise + * the net device cannot be freed when + * driver calls free_netdev(), because the + * kobject is being hold. + */ + kobject_put(&dev->dev.kobj); } /* * Prevent userspace races by waiting until the network diff --git a/net/core/sock.c b/net/core/sock.c index 074ac45dc36b..970c520b2a9f 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1540,6 +1540,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, cgroup_sk_alloc(&sk->sk_cgrp_data); sock_update_classid(&sk->sk_cgrp_data); sock_update_netprioidx(&sk->sk_cgrp_data); + sk_tx_queue_clear(sk); } return sk; @@ -1743,6 +1744,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) */ sk_refcnt_debug_inc(newsk); sk_set_socket(newsk, NULL); + sk_tx_queue_clear(newsk); newsk->sk_wq = NULL; if (newsk->sk_prot->sockets_allocated) diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index eff703cb13b6..bc233fdfae0f 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -839,7 +839,7 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, if (fl4.flowi4_scope < RT_SCOPE_LINK) fl4.flowi4_scope = RT_SCOPE_LINK; - if (cfg->fc_table) + if (cfg->fc_table && cfg->fc_table != RT_TABLE_MAIN) tbl = fib_get_table(net, cfg->fc_table); if (tbl) diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index f6793017a20d..44cc17c43a6b 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -98,9 +98,10 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, __be32 remote, __be32 local, __be32 key) { - unsigned int hash; struct ip_tunnel *t, *cand = NULL; struct hlist_head *head; + struct net_device *ndev; + unsigned int hash; hash = ip_tunnel_hash(key, remote); head = &itn->tunnels[hash]; @@ -175,8 +176,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, if (t && t->dev->flags & IFF_UP) return t; - if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP) - return netdev_priv(itn->fb_tunnel_dev); + ndev = READ_ONCE(itn->fb_tunnel_dev); + if (ndev && ndev->flags & IFF_UP) + return netdev_priv(ndev); return NULL; } @@ -1211,9 +1213,9 @@ void ip_tunnel_uninit(struct net_device *dev) struct ip_tunnel_net *itn; itn = net_generic(net, tunnel->ip_tnl_net_id); - /* fb_tunnel_dev will be unregisted in net-exit call. */ - if (itn->fb_tunnel_dev != dev) - ip_tunnel_del(itn, netdev_priv(dev)); + ip_tunnel_del(itn, netdev_priv(dev)); + if (itn->fb_tunnel_dev == dev) + WRITE_ONCE(itn->fb_tunnel_dev, NULL); dst_cache_reset(&tunnel->dst_cache); } diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 78bfadfcf342..8b5ba0a5cd38 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -403,6 +403,8 @@ static void hystart_update(struct sock *sk, u32 delay) if (hystart_detect & HYSTART_DELAY) { /* obtain the minimum delay of more than sampling packets */ + if (ca->curr_rtt > delay) + ca->curr_rtt = delay; if (ca->sample_cnt < HYSTART_MIN_SAMPLES) { if (ca->curr_rtt == 0 || ca->curr_rtt > delay) ca->curr_rtt = delay; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 42eb50b0e504..ed24caa1aebf 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4527,7 +4527,11 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, skb, &fragstolen)) { coalesce_done: - tcp_grow_window(sk, skb); + /* For non sack flows, do not grow window to force DUPACK + * and trigger fast retransmit. + */ + if (tcp_is_sack(tp)) + tcp_grow_window(sk, skb); kfree_skb_partial(skb, fragstolen); skb = NULL; goto add_sack; @@ -4611,7 +4615,11 @@ add_sack: tcp_sack_new_ofo_skb(sk, seq, end_seq); end: if (skb) { - tcp_grow_window(sk, skb); + /* For non sack flows, do not grow window to force DUPACK + * and trigger fast retransmit. + */ + if (tcp_is_sack(tp)) + tcp_grow_window(sk, skb); skb_condense(skb); skb_set_owner_r(skb, sk); } diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 726ba41133a3..e07cc2cfc1a6 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -124,6 +124,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, int dev_type = (gre_proto == htons(ETH_P_TEB)) ? ARPHRD_ETHER : ARPHRD_IP6GRE; int score, cand_score = 4; + struct net_device *ndev; for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) { if (!ipv6_addr_equal(local, &t->parms.laddr) || @@ -226,9 +227,9 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, if (cand) return cand; - dev = ign->fb_tunnel_dev; - if (dev->flags & IFF_UP) - return netdev_priv(dev); + ndev = READ_ONCE(ign->fb_tunnel_dev); + if (ndev && ndev->flags & IFF_UP) + return netdev_priv(ndev); return NULL; } @@ -364,6 +365,8 @@ static void ip6gre_tunnel_uninit(struct net_device *dev) struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); ip6gre_tunnel_unlink(ign, t); + if (ign->fb_tunnel_dev == dev) + WRITE_ONCE(ign->fb_tunnel_dev, NULL); dst_cache_reset(&t->dst_cache); dev_put(dev); } diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 611dc5d55fa0..959057515fc9 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -2599,6 +2599,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev) idev->mc_list = i->next; write_unlock_bh(&idev->lock); + ip6_mc_clear_src(i); ma_put(i); write_lock_bh(&idev->lock); } diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index c2b21c9c1229..5c59bbad8d19 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -381,6 +381,8 @@ ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len, for (id = 0; id < IPSET_EXT_ID_MAX; id++) { if (!add_extension(id, cadt_flags, tb)) continue; + if (align < ip_set_extensions[id].align) + align = ip_set_extensions[id].align; len = ALIGN(len, ip_set_extensions[id].align); set->offset[id] = len; set->extensions |= ip_set_extensions[id].type; diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 2dd13f5c47c8..61425179780c 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -26,6 +26,11 @@ #include #include "ar-internal.h" +static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call, + unsigned long user_call_ID) +{ +} + /* * Preallocate a single service call, connection and peer and, if possible, * give them a user ID and attach the user's side of the ID to them. @@ -227,6 +232,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx) if (rx->discard_new_call) { _debug("discard %lx", call->user_call_ID); rx->discard_new_call(call, call->user_call_ID); + if (call->notify_rx) + call->notify_rx = rxrpc_dummy_notify; rxrpc_put_call(call, rxrpc_call_put_kernel); } rxrpc_call_completed(call); diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 18ce6f97462b..98285b117a7c 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -664,13 +664,12 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU), rwind, ntohl(ackinfo->jumbo_max)); + if (rwind > RXRPC_RXTX_BUFF_SIZE - 1) + rwind = RXRPC_RXTX_BUFF_SIZE - 1; if (call->tx_winsize != rwind) { - if (rwind > RXRPC_RXTX_BUFF_SIZE - 1) - rwind = RXRPC_RXTX_BUFF_SIZE - 1; if (rwind > call->tx_winsize) wake = true; - trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, - ntohl(ackinfo->rwind), wake); + trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake); call->tx_winsize = rwind; } diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 21b981abbacb..091a9746627f 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -341,6 +341,7 @@ void __netdev_watchdog_up(struct net_device *dev) dev_hold(dev); } } +EXPORT_SYMBOL_GPL(__netdev_watchdog_up); static void dev_watchdog_up(struct net_device *dev) { diff --git a/net/sctp/associola.c b/net/sctp/associola.c index dd1a3bd80be5..0a5764016721 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1598,12 +1598,15 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, enum sctp_scope scope, gfp_t gfp) { + struct sock *sk = asoc->base.sk; int flags; /* Use scoping rules to determine the subset of addresses from * the endpoint. */ - flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; + flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; + if (!inet_v6_ipv6only(sk)) + flags |= SCTP_ADDR4_ALLOWED; if (asoc->peer.ipv4_address) flags |= SCTP_ADDR4_PEERSUPP; if (asoc->peer.ipv6_address) diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index 7df3704982f5..38d01cfb313e 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c @@ -453,6 +453,7 @@ static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest, * well as the remote peer. */ if ((((AF_INET == addr->sa.sa_family) && + (flags & SCTP_ADDR4_ALLOWED) && (flags & SCTP_ADDR4_PEERSUPP))) || (((AF_INET6 == addr->sa.sa_family) && (flags & SCTP_ADDR6_ALLOWED) && diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 785456df7505..8fe9c0646205 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -213,7 +213,8 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp, * sock as well as the remote peer. */ if (addr->a.sa.sa_family == AF_INET && - !(copy_flags & SCTP_ADDR4_PEERSUPP)) + (!(copy_flags & SCTP_ADDR4_ALLOWED) || + !(copy_flags & SCTP_ADDR4_PEERSUPP))) continue; if (addr->a.sa.sa_family == AF_INET6 && (!(copy_flags & SCTP_ADDR6_ALLOWED) || diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 34f94052c519..137f92bfafac 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -1347,6 +1347,7 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data) q.len = strlen(gssd_dummy_clnt_dir[0].name); clnt_dentry = d_hash_and_lookup(gssd_dentry, &q); if (!clnt_dentry) { + __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1); pipe_dentry = ERR_PTR(-ENOENT); goto out; } diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 4f382805eb9c..87cf0b933f99 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1036,6 +1036,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, base = 0; } else { base -= buf->head[0].iov_len; + subbuf->head[0].iov_base = buf->head[0].iov_base; subbuf->head[0].iov_len = 0; } @@ -1048,6 +1049,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, base = 0; } else { base -= buf->page_len; + subbuf->pages = buf->pages; + subbuf->page_base = 0; subbuf->page_len = 0; } @@ -1059,6 +1062,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, base = 0; } else { base -= buf->tail[0].iov_len; + subbuf->tail[0].iov_base = buf->tail[0].iov_base; subbuf->tail[0].iov_len = 0; } diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index a64029ae9598..71ed64f0b0ca 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -82,20 +82,21 @@ cc-cross-prefix = \ fi))) # output directory for tests below -TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/) +TMPOUT = $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_$$$$ # try-run # Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise) # Exit code chooses option. "$$TMP" serves as a temporary file and is # automatically cleaned up. try-run = $(shell set -e; \ - TMP="$(TMPOUT).$$$$.tmp"; \ - TMPO="$(TMPOUT).$$$$.o"; \ + TMP=$(TMPOUT)/tmp; \ + TMPO=$(TMPOUT)/tmp.o; \ + mkdir -p $(TMPOUT); \ + trap "rm -rf $(TMPOUT)" EXIT; \ if ($(1)) >/dev/null 2>&1; \ then echo "$(2)"; \ else echo "$(3)"; \ - fi; \ - rm -f "$$TMP" "$$TMPO") + fi) # as-option # Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,) diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index dc03ac68dd5a..0c80ab941fd3 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -381,7 +381,7 @@ cmd_lzo = (cat $(filter-out FORCE,$^) | \ quiet_cmd_lz4 = LZ4 $@ cmd_lz4 = (cat $(filter-out FORCE,$^) | \ - lz4 -c -l -12 --favor-decSpeed stdin stdout && \ + lz4 -l -12 --favor-decSpeed stdin stdout && \ $(call size_append, $(filter-out FORCE,$^))) > $@ || \ (rm -f $@ ; false) diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index c58df3375390..b9dcf7ec95a0 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -123,11 +123,11 @@ static int apparmor_ptrace_traceme(struct task_struct *parent) struct aa_label *tracer, *tracee; int error; - tracee = begin_current_label_crit_section(); + tracee = __begin_current_label_crit_section(); tracer = aa_get_task_label(parent); error = aa_may_ptrace(tracer, tracee, AA_PTRACE_TRACE); aa_put_label(tracer); - end_current_label_crit_section(tracee); + __end_current_label_crit_section(tracee); return error; } diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 6b4ebaefd8f8..9e8cfc409b4b 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -3861,6 +3861,11 @@ HDA_CODEC_ENTRY(0x10de0095, "GPU 95 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009a, "GPU 9a HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch), HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch), HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi), diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c index 8a2e3bbce3a1..ad16c8310dd3 100644 --- a/sound/soc/rockchip/rockchip_pdm.c +++ b/sound/soc/rockchip/rockchip_pdm.c @@ -478,8 +478,10 @@ static int rockchip_pdm_resume(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(dev); return ret; + } ret = regcache_sync(pdm->regmap); diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 51692b762201..20f32c260fee 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -603,8 +603,9 @@ static int check_matrix_bitmap(unsigned char *bmap, * if failed, give up and free the control instance. */ -int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list, - struct snd_kcontrol *kctl) +int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list, + struct snd_kcontrol *kctl, + bool is_std_info) { struct usb_mixer_interface *mixer = list->mixer; int err; @@ -617,6 +618,7 @@ int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list, return err; } list->kctl = kctl; + list->is_std_info = is_std_info; list->next_id_elem = mixer->id_elems[list->id]; mixer->id_elems[list->id] = list; return 0; @@ -2750,15 +2752,23 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid) { struct usb_mixer_elem_list *list; - for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem) + for_each_mixer_elem(list, mixer, unitid) { + struct usb_mixer_elem_info *info; + + if (!list->is_std_info) + continue; + info = mixer_elem_list_to_info(list); + /* invalidate cache, so the value is read from the device */ + info->cached = 0; snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &list->kctl->id); + } } static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer, struct usb_mixer_elem_list *list) { - struct usb_mixer_elem_info *cval = (struct usb_mixer_elem_info *)list; + struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list); static char *val_types[] = {"BOOLEAN", "INV_BOOLEAN", "S8", "U8", "S16", "U16"}; snd_iprintf(buffer, " Info: id=%i, control=%i, cmask=0x%x, " @@ -2784,8 +2794,7 @@ static void snd_usb_mixer_proc_read(struct snd_info_entry *entry, mixer->ignore_ctl_error); snd_iprintf(buffer, "Card: %s\n", chip->card->longname); for (unitid = 0; unitid < MAX_ID_ELEMS; unitid++) { - for (list = mixer->id_elems[unitid]; list; - list = list->next_id_elem) { + for_each_mixer_elem(list, mixer, unitid) { snd_iprintf(buffer, " Unit: %i\n", list->id); if (list->kctl) snd_iprintf(buffer, @@ -2815,19 +2824,21 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer, return; } - for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem) + for_each_mixer_elem(list, mixer, unitid) count++; if (count == 0) return; - for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem) { + for_each_mixer_elem(list, mixer, unitid) { struct usb_mixer_elem_info *info; if (!list->kctl) continue; + if (!list->is_std_info) + continue; - info = (struct usb_mixer_elem_info *)list; + info = mixer_elem_list_to_info(list); if (count > 1 && info->control != control) continue; @@ -3050,7 +3061,7 @@ int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer) static int restore_mixer_value(struct usb_mixer_elem_list *list) { - struct usb_mixer_elem_info *cval = (struct usb_mixer_elem_info *)list; + struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list); int c, err, idx; if (cval->cmask) { @@ -3086,8 +3097,7 @@ int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume) if (reset_resume) { /* restore cached mixer values */ for (id = 0; id < MAX_ID_ELEMS; id++) { - for (list = mixer->id_elems[id]; list; - list = list->next_id_elem) { + for_each_mixer_elem(list, mixer, id) { if (list->resume) { err = list->resume(list); if (err < 0) diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h index ba27f7ade670..7c824a44589b 100644 --- a/sound/usb/mixer.h +++ b/sound/usb/mixer.h @@ -49,10 +49,17 @@ struct usb_mixer_elem_list { struct usb_mixer_elem_list *next_id_elem; /* list of controls with same id */ struct snd_kcontrol *kctl; unsigned int id; + bool is_std_info; usb_mixer_elem_dump_func_t dump; usb_mixer_elem_resume_func_t resume; }; +/* iterate over mixer element list of the given unit id */ +#define for_each_mixer_elem(list, mixer, id) \ + for ((list) = (mixer)->id_elems[id]; (list); (list) = (list)->next_id_elem) +#define mixer_elem_list_to_info(list) \ + container_of(list, struct usb_mixer_elem_info, head) + struct usb_mixer_elem_info { struct usb_mixer_elem_list head; unsigned int control; /* CS or ICN (high byte) */ @@ -80,8 +87,12 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid); int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval, int request, int validx, int value_set); -int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list, - struct snd_kcontrol *kctl); +int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list, + struct snd_kcontrol *kctl, + bool is_std_info); + +#define snd_usb_mixer_add_control(list, kctl) \ + snd_usb_mixer_add_list(list, kctl, true) void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list, struct usb_mixer_interface *mixer, diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index b9ea4a42aee4..5604cce30a58 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -169,7 +169,8 @@ static int add_single_ctl_with_resume(struct usb_mixer_interface *mixer, return -ENOMEM; } kctl->private_free = snd_usb_mixer_elem_free; - return snd_usb_mixer_add_control(list, kctl); + /* don't use snd_usb_mixer_add_control() here, this is a special list element */ + return snd_usb_mixer_add_list(list, kctl, false); } /* @@ -1171,7 +1172,7 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip, int unitid = 12; /* SamleRate ExtensionUnit ID */ list_for_each_entry(mixer, &chip->mixer_list, list) { - cval = (struct usb_mixer_elem_info *)mixer->id_elems[unitid]; + cval = mixer_elem_list_to_info(mixer->id_elems[unitid]); if (cval) { snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, cval->control << 8, diff --git a/sound/usb/mixer_scarlett.c b/sound/usb/mixer_scarlett.c index c33e2378089d..4aeb9488a0c9 100644 --- a/sound/usb/mixer_scarlett.c +++ b/sound/usb/mixer_scarlett.c @@ -287,8 +287,7 @@ static int scarlett_ctl_switch_put(struct snd_kcontrol *kctl, static int scarlett_ctl_resume(struct usb_mixer_elem_list *list) { - struct usb_mixer_elem_info *elem = - container_of(list, struct usb_mixer_elem_info, head); + struct usb_mixer_elem_info *elem = mixer_elem_list_to_info(list); int i; for (i = 0; i < elem->channels; i++) @@ -447,8 +446,7 @@ static int scarlett_ctl_enum_put(struct snd_kcontrol *kctl, static int scarlett_ctl_enum_resume(struct usb_mixer_elem_list *list) { - struct usb_mixer_elem_info *elem = - container_of(list, struct usb_mixer_elem_info, head); + struct usb_mixer_elem_info *elem = mixer_elem_list_to_info(list); if (elem->cached) snd_usb_set_cur_mix_value(elem, 0, 0, *elem->cache_val); diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index f29a8ed4f856..cd36394e27ae 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1164,6 +1164,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) static bool is_itf_usb_dsd_2alts_dac(unsigned int id) { switch (id) { + case USB_ID(0x154e, 0x1002): /* Denon DCD-1500RE */ case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */ case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */ case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */ diff --git a/techpack/audio/4.0/dsp/codecs/audio_alac.c b/techpack/audio/4.0/dsp/codecs/audio_alac.c index a3e473f7082e..cda7995ed0f9 100644 --- a/techpack/audio/4.0/dsp/codecs/audio_alac.c +++ b/techpack/audio/4.0/dsp/codecs/audio_alac.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. */ #include @@ -10,6 +10,7 @@ static struct miscdevice audio_alac_misc; static struct ws_mgr audio_alac_ws_mgr; +#ifdef CONFIG_DEBUG_FS static const struct file_operations audio_alac_debug_fops = { .read = audio_aio_debug_read, .open = audio_aio_debug_open, @@ -20,7 +21,7 @@ static struct dentry *config_debugfs_create_file(const char *name, void *data) return debugfs_create_file(name, S_IFREG | 0444, NULL, (void *)data, &audio_alac_debug_fops); } - +#endif static int alac_channel_map(u8 *channel_mapping, uint32_t channels); static long audio_ioctl_shared(struct file *file, unsigned int cmd, @@ -323,10 +324,12 @@ static int audio_open(struct inode *inode, struct file *file) } snprintf(name, sizeof(name), "msm_alac_%04x", audio->ac->session); +#ifdef CONFIG_DEBUG_FS audio->dentry = config_debugfs_create_file(name, (void *)audio); if (IS_ERR_OR_NULL(audio->dentry)) pr_debug("debugfs_create_file failed\n"); +#endif pr_debug("%s:alacdec success mode[%d]session[%d]\n", __func__, audio->feedback, audio->ac->session); diff --git a/techpack/audio/4.0/dsp/codecs/audio_ape.c b/techpack/audio/4.0/dsp/codecs/audio_ape.c index 319828bd5465..939f1e1ae2b9 100644 --- a/techpack/audio/4.0/dsp/codecs/audio_ape.c +++ b/techpack/audio/4.0/dsp/codecs/audio_ape.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. */ #include @@ -10,6 +10,7 @@ static struct miscdevice audio_ape_misc; static struct ws_mgr audio_ape_ws_mgr; +#ifdef CONFIG_DEBUG_FS static const struct file_operations audio_ape_debug_fops = { .read = audio_aio_debug_read, .open = audio_aio_debug_open, @@ -19,6 +20,7 @@ static struct dentry *config_debugfs_create_file(const char *name, void *data) return debugfs_create_file(name, S_IFREG | 0444, NULL, (void *)data, &audio_ape_debug_fops); } +#endif static long audio_ioctl_shared(struct file *file, unsigned int cmd, void *arg) @@ -305,10 +307,12 @@ static int audio_open(struct inode *inode, struct file *file) } snprintf(name, sizeof(name), "msm_ape_%04x", audio->ac->session); +#ifdef CONFIG_DEBUG_FS audio->dentry = config_debugfs_create_file(name, (void *)audio); if (IS_ERR_OR_NULL(audio->dentry)) pr_debug("debugfs_create_file failed\n"); +#endif pr_debug("%s:apedec success mode[%d]session[%d]\n", __func__, audio->feedback, audio->ac->session); diff --git a/techpack/audio/4.0/dsp/codecs/audio_g711alaw.c b/techpack/audio/4.0/dsp/codecs/audio_g711alaw.c index 27d7fe0b35dd..7cf6810b1462 100644 --- a/techpack/audio/4.0/dsp/codecs/audio_g711alaw.c +++ b/techpack/audio/4.0/dsp/codecs/audio_g711alaw.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, 2019-2020, The Linux Foundation. All rights reserved. */ #include @@ -10,6 +10,7 @@ static struct miscdevice audio_g711alaw_misc; static struct ws_mgr audio_g711_ws_mgr; +#ifdef CONFIG_DEBUG_FS static const struct file_operations audio_g711_debug_fops = { .read = audio_aio_debug_read, .open = audio_aio_debug_open, @@ -20,6 +21,7 @@ static struct dentry *config_debugfs_create_file(const char *name, void *data) return debugfs_create_file(name, S_IFREG | 0444, NULL, (void *)data, &audio_g711_debug_fops); } +#endif static int g711_channel_map(u8 *channel_mapping, uint32_t channels); @@ -278,10 +280,12 @@ static int audio_open(struct inode *inode, struct file *file) } snprintf(name, sizeof(name), "msm_g711_%04x", audio->ac->session); +#ifdef CONFIG_DEBUG_FS audio->dentry = config_debugfs_create_file(name, (void *)audio); if (IS_ERR_OR_NULL(audio->dentry)) pr_debug("%s: debugfs_create_file failed\n", __func__); +#endif pr_debug("%s: g711dec success mode[%d]session[%d]\n", __func__, audio->feedback, audio->ac->session); diff --git a/techpack/audio/4.0/dsp/codecs/audio_g711mlaw.c b/techpack/audio/4.0/dsp/codecs/audio_g711mlaw.c index 57c7f1ae7e09..1c16eef81954 100644 --- a/techpack/audio/4.0/dsp/codecs/audio_g711mlaw.c +++ b/techpack/audio/4.0/dsp/codecs/audio_g711mlaw.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, 2019-2020, The Linux Foundation. All rights reserved. */ #include @@ -10,6 +10,7 @@ static struct miscdevice audio_g711mlaw_misc; static struct ws_mgr audio_g711_ws_mgr; +#ifdef CONFIG_DEBUG_FS static const struct file_operations audio_g711_debug_fops = { .read = audio_aio_debug_read, .open = audio_aio_debug_open, @@ -20,6 +21,7 @@ static struct dentry *config_debugfs_create_file(const char *name, void *data) return debugfs_create_file(name, S_IFREG | 0444, NULL, (void *)data, &audio_g711_debug_fops); } +#endif static int g711_channel_map(u8 *channel_mapping, uint32_t channels); @@ -277,10 +279,12 @@ static int audio_open(struct inode *inode, struct file *file) } snprintf(name, sizeof(name), "msm_g711_%04x", audio->ac->session); +#ifdef CONFIG_DEBUG_FS audio->dentry = config_debugfs_create_file(name, (void *)audio); if (IS_ERR_OR_NULL(audio->dentry)) pr_debug("%s: debugfs_create_file failed\n", __func__); +#endif pr_debug("%s: g711dec success mode[%d]session[%d]\n", __func__, audio->feedback, audio->ac->session); diff --git a/techpack/audio/4.0/dsp/q6asm.c b/techpack/audio/4.0/dsp/q6asm.c index 8314df747e9f..bb1df5b24432 100644 --- a/techpack/audio/4.0/dsp/q6asm.c +++ b/techpack/audio/4.0/dsp/q6asm.c @@ -144,56 +144,6 @@ struct generic_get_data_ { }; static struct generic_get_data_ *generic_get_data; -#ifdef CONFIG_DEBUG_FS -#define OUT_BUFFER_SIZE 56 -#define IN_BUFFER_SIZE 24 - -static struct timeval out_cold_tv; -static struct timeval out_warm_tv; -static struct timeval out_cont_tv; -static struct timeval in_cont_tv; -static long out_enable_flag; -static long in_enable_flag; -static struct dentry *out_dentry; -static struct dentry *in_dentry; -static int in_cont_index; -/*This var is used to keep track of first write done for cold output latency */ -static int out_cold_index; -static char *out_buffer; -static char *in_buffer; - -static uint32_t adsp_reg_event_opcode[] = { - ASM_STREAM_CMD_REGISTER_PP_EVENTS, - ASM_STREAM_CMD_REGISTER_ENCDEC_EVENTS, - ASM_STREAM_CMD_REGISTER_IEC_61937_FMT_UPDATE }; - -static uint32_t adsp_raise_event_opcode[] = { - ASM_STREAM_PP_EVENT, - ASM_STREAM_CMD_ENCDEC_EVENTS, - ASM_IEC_61937_MEDIA_FMT_EVENT }; - -static int is_adsp_reg_event(uint32_t cmd) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(adsp_reg_event_opcode); i++) { - if (cmd == adsp_reg_event_opcode[i]) - return i; - } - return -EINVAL; -} - -static int is_adsp_raise_event(uint32_t cmd) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(adsp_raise_event_opcode); i++) { - if (cmd == adsp_raise_event_opcode[i]) - return i; - } - return -EINVAL; -} - static inline void q6asm_set_flag_in_token(union asm_token_struct *asm_token, int flag, int flag_offset) { @@ -276,6 +226,56 @@ uint8_t q6asm_get_stream_id_from_token(uint32_t token) } EXPORT_SYMBOL(q6asm_get_stream_id_from_token); +static uint32_t adsp_reg_event_opcode[] = { + ASM_STREAM_CMD_REGISTER_PP_EVENTS, + ASM_STREAM_CMD_REGISTER_ENCDEC_EVENTS, + ASM_STREAM_CMD_REGISTER_IEC_61937_FMT_UPDATE }; + +static int is_adsp_reg_event(uint32_t cmd) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(adsp_reg_event_opcode); i++) { + if (cmd == adsp_reg_event_opcode[i]) + return i; + } + return -EINVAL; +} + +static uint32_t adsp_raise_event_opcode[] = { + ASM_STREAM_PP_EVENT, + ASM_STREAM_CMD_ENCDEC_EVENTS, + ASM_IEC_61937_MEDIA_FMT_EVENT }; + +static int is_adsp_raise_event(uint32_t cmd) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(adsp_raise_event_opcode); i++) { + if (cmd == adsp_raise_event_opcode[i]) + return i; + } + return -EINVAL; +} + +#ifdef CONFIG_DEBUG_FS +#define OUT_BUFFER_SIZE 56 +#define IN_BUFFER_SIZE 24 + +static struct timeval out_cold_tv; +static struct timeval out_warm_tv; +static struct timeval out_cont_tv; +static struct timeval in_cont_tv; +static long out_enable_flag; +static long in_enable_flag; +static struct dentry *out_dentry; +static struct dentry *in_dentry; +static int in_cont_index; +/*This var is used to keep track of first write done for cold output latency */ +static int out_cold_index; +static char *out_buffer; +static char *in_buffer; + static int audio_output_latency_dbgfs_open(struct inode *inode, struct file *file) { diff --git a/techpack/audio/4.0/ipc/apr.c b/techpack/audio/4.0/ipc/apr.c index 0ea269454035..033f6b24f45f 100644 --- a/techpack/audio/4.0/ipc/apr.c +++ b/techpack/audio/4.0/ipc/apr.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2010-2014, 2016-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2010-2014, 2016-2020, The Linux Foundation. All rights reserved. */ #include @@ -1117,9 +1117,9 @@ static int __init apr_debug_init(void) } #else static int __init apr_debug_init(void) -( +{ return 0; -) +} #endif static void apr_cleanup(void) @@ -1140,7 +1140,9 @@ static void apr_cleanup(void) mutex_destroy(&client[i][j].svc[k].m_lock); } } +#ifdef CONFIG_DEBUG_FS debugfs_remove(debugfs_apr_debug); +#endif } static int apr_probe(struct platform_device *pdev) diff --git a/techpack/audio/4.0/ipc/apr_vm.c b/techpack/audio/4.0/ipc/apr_vm.c index f94e3a22db64..1b66013b8cf4 100644 --- a/techpack/audio/4.0/ipc/apr_vm.c +++ b/techpack/audio/4.0/ipc/apr_vm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2010-2014, 2016-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2010-2014, 2016-2020 The Linux Foundation. All rights reserved. */ #include @@ -1308,9 +1308,9 @@ static int __init apr_debug_init(void) } #else static int __init apr_debug_init(void) -( +{ return 0; -) +} #endif static void apr_cleanup(void) @@ -1331,7 +1331,9 @@ static void apr_cleanup(void) mutex_destroy(&client[i][j].svc[k].m_lock); } } +#ifdef CONFIG_DEBUG_FS debugfs_remove(debugfs_apr_debug); +#endif } static int apr_probe(struct platform_device *pdev) diff --git a/techpack/audio/dsp/codecs/audio_alac.c b/techpack/audio/dsp/codecs/audio_alac.c index 1bc1a3eb530a..30dfaeaf8db8 100644 --- a/techpack/audio/dsp/codecs/audio_alac.c +++ b/techpack/audio/dsp/codecs/audio_alac.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -19,6 +19,7 @@ static struct miscdevice audio_alac_misc; static struct ws_mgr audio_alac_ws_mgr; +#ifdef CONFIG_DEBUG_FS static const struct file_operations audio_alac_debug_fops = { .read = audio_aio_debug_read, .open = audio_aio_debug_open, @@ -29,6 +30,7 @@ static struct dentry *config_debugfs_create_file(const char *name, void *data) return debugfs_create_file(name, S_IFREG | 0444, NULL, (void *)data, &audio_alac_debug_fops); } +#endif static int alac_channel_map(u8 *channel_mapping, uint32_t channels); @@ -332,10 +334,12 @@ static int audio_open(struct inode *inode, struct file *file) } snprintf(name, sizeof(name), "msm_alac_%04x", audio->ac->session); +#ifdef CONFIG_DEBUG_FS audio->dentry = config_debugfs_create_file(name, (void *)audio); if (IS_ERR_OR_NULL(audio->dentry)) pr_debug("debugfs_create_file failed\n"); +#endif pr_debug("%s:alacdec success mode[%d]session[%d]\n", __func__, audio->feedback, audio->ac->session); diff --git a/techpack/audio/dsp/codecs/audio_ape.c b/techpack/audio/dsp/codecs/audio_ape.c index 3f81b535dad6..1bdd40dc9d20 100644 --- a/techpack/audio/dsp/codecs/audio_ape.c +++ b/techpack/audio/dsp/codecs/audio_ape.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -19,6 +19,7 @@ static struct miscdevice audio_ape_misc; static struct ws_mgr audio_ape_ws_mgr; +#ifdef CONFIG_DEBUG_FS static const struct file_operations audio_ape_debug_fops = { .read = audio_aio_debug_read, .open = audio_aio_debug_open, @@ -28,6 +29,7 @@ static struct dentry *config_debugfs_create_file(const char *name, void *data) return debugfs_create_file(name, S_IFREG | 0444, NULL, (void *)data, &audio_ape_debug_fops); } +#endif static long audio_ioctl_shared(struct file *file, unsigned int cmd, void *arg) @@ -314,10 +316,12 @@ static int audio_open(struct inode *inode, struct file *file) } snprintf(name, sizeof(name), "msm_ape_%04x", audio->ac->session); +#ifdef CONFIG_DEBUG_FS audio->dentry = config_debugfs_create_file(name, (void *)audio); if (IS_ERR_OR_NULL(audio->dentry)) pr_debug("debugfs_create_file failed\n"); +#endif pr_debug("%s:apedec success mode[%d]session[%d]\n", __func__, audio->feedback, audio->ac->session); diff --git a/techpack/audio/dsp/codecs/audio_g711alaw.c b/techpack/audio/dsp/codecs/audio_g711alaw.c index fec46d24e2e8..bc4b1b4a487a 100644 --- a/techpack/audio/dsp/codecs/audio_g711alaw.c +++ b/techpack/audio/dsp/codecs/audio_g711alaw.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, 2019-2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -19,6 +19,7 @@ static struct miscdevice audio_g711alaw_misc; static struct ws_mgr audio_g711_ws_mgr; +#ifdef CONFIG_DEBUG_FS static const struct file_operations audio_g711_debug_fops = { .read = audio_aio_debug_read, .open = audio_aio_debug_open, @@ -29,6 +30,7 @@ static struct dentry *config_debugfs_create_file(const char *name, void *data) return debugfs_create_file(name, S_IFREG | 0444, NULL, (void *)data, &audio_g711_debug_fops); } +#endif static int g711_channel_map(u8 *channel_mapping, uint32_t channels); @@ -287,10 +289,12 @@ static int audio_open(struct inode *inode, struct file *file) } snprintf(name, sizeof(name), "msm_g711_%04x", audio->ac->session); +#ifdef CONFIG_DEBUG_FS audio->dentry = config_debugfs_create_file(name, (void *)audio); if (IS_ERR_OR_NULL(audio->dentry)) pr_debug("%s: debugfs_create_file failed\n", __func__); +#endif pr_debug("%s: g711dec success mode[%d]session[%d]\n", __func__, audio->feedback, audio->ac->session); diff --git a/techpack/audio/dsp/codecs/audio_g711mlaw.c b/techpack/audio/dsp/codecs/audio_g711mlaw.c index c27768ae269f..b4eb7def1b12 100644 --- a/techpack/audio/dsp/codecs/audio_g711mlaw.c +++ b/techpack/audio/dsp/codecs/audio_g711mlaw.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, 2019-2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -19,6 +19,7 @@ static struct miscdevice audio_g711mlaw_misc; static struct ws_mgr audio_g711_ws_mgr; +#ifdef CONFIG_DEBUG_FS static const struct file_operations audio_g711_debug_fops = { .read = audio_aio_debug_read, .open = audio_aio_debug_open, @@ -29,6 +30,7 @@ static struct dentry *config_debugfs_create_file(const char *name, void *data) return debugfs_create_file(name, S_IFREG | 0444, NULL, (void *)data, &audio_g711_debug_fops); } +#endif static int g711_channel_map(u8 *channel_mapping, uint32_t channels); @@ -286,10 +288,12 @@ static int audio_open(struct inode *inode, struct file *file) } snprintf(name, sizeof(name), "msm_g711_%04x", audio->ac->session); +#ifdef CONFIG_DEBUG_FS audio->dentry = config_debugfs_create_file(name, (void *)audio); if (IS_ERR_OR_NULL(audio->dentry)) pr_debug("%s: debugfs_create_file failed\n", __func__); +#endif pr_debug("%s: g711dec success mode[%d]session[%d]\n", __func__, audio->feedback, audio->ac->session); diff --git a/techpack/audio/dsp/q6asm.c b/techpack/audio/dsp/q6asm.c index 957a8546c708..f89b01565870 100644 --- a/techpack/audio/dsp/q6asm.c +++ b/techpack/audio/dsp/q6asm.c @@ -146,56 +146,6 @@ struct generic_get_data_ { }; static struct generic_get_data_ *generic_get_data; -#ifdef CONFIG_DEBUG_FS -#define OUT_BUFFER_SIZE 56 -#define IN_BUFFER_SIZE 24 - -static struct timeval out_cold_tv; -static struct timeval out_warm_tv; -static struct timeval out_cont_tv; -static struct timeval in_cont_tv; -static long out_enable_flag; -static long in_enable_flag; -static struct dentry *out_dentry; -static struct dentry *in_dentry; -static int in_cont_index; -/*This var is used to keep track of first write done for cold output latency */ -static int out_cold_index; -static char *out_buffer; -static char *in_buffer; - -static uint32_t adsp_reg_event_opcode[] = { - ASM_STREAM_CMD_REGISTER_PP_EVENTS, - ASM_STREAM_CMD_REGISTER_ENCDEC_EVENTS, - ASM_STREAM_CMD_REGISTER_IEC_61937_FMT_UPDATE }; - -static uint32_t adsp_raise_event_opcode[] = { - ASM_STREAM_PP_EVENT, - ASM_STREAM_CMD_ENCDEC_EVENTS, - ASM_IEC_61937_MEDIA_FMT_EVENT }; - -static int is_adsp_reg_event(uint32_t cmd) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(adsp_reg_event_opcode); i++) { - if (cmd == adsp_reg_event_opcode[i]) - return i; - } - return -EINVAL; -} - -static int is_adsp_raise_event(uint32_t cmd) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(adsp_raise_event_opcode); i++) { - if (cmd == adsp_raise_event_opcode[i]) - return i; - } - return -EINVAL; -} - static inline void q6asm_set_flag_in_token(union asm_token_struct *asm_token, int flag, int flag_offset) { @@ -278,6 +228,56 @@ uint8_t q6asm_get_stream_id_from_token(uint32_t token) } EXPORT_SYMBOL(q6asm_get_stream_id_from_token); +static uint32_t adsp_reg_event_opcode[] = { + ASM_STREAM_CMD_REGISTER_PP_EVENTS, + ASM_STREAM_CMD_REGISTER_ENCDEC_EVENTS, + ASM_STREAM_CMD_REGISTER_IEC_61937_FMT_UPDATE }; + +static int is_adsp_reg_event(uint32_t cmd) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(adsp_reg_event_opcode); i++) { + if (cmd == adsp_reg_event_opcode[i]) + return i; + } + return -EINVAL; +} + +static uint32_t adsp_raise_event_opcode[] = { + ASM_STREAM_PP_EVENT, + ASM_STREAM_CMD_ENCDEC_EVENTS, + ASM_IEC_61937_MEDIA_FMT_EVENT }; + +static int is_adsp_raise_event(uint32_t cmd) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(adsp_raise_event_opcode); i++) { + if (cmd == adsp_raise_event_opcode[i]) + return i; + } + return -EINVAL; +} + +#ifdef CONFIG_DEBUG_FS +#define OUT_BUFFER_SIZE 56 +#define IN_BUFFER_SIZE 24 + +static struct timeval out_cold_tv; +static struct timeval out_warm_tv; +static struct timeval out_cont_tv; +static struct timeval in_cont_tv; +static long out_enable_flag; +static long in_enable_flag; +static struct dentry *out_dentry; +static struct dentry *in_dentry; +static int in_cont_index; +/*This var is used to keep track of first write done for cold output latency */ +static int out_cold_index; +static char *out_buffer; +static char *in_buffer; + static int audio_output_latency_dbgfs_open(struct inode *inode, struct file *file) { diff --git a/techpack/audio/ipc/apr.c b/techpack/audio/ipc/apr.c index b92482006f50..9390c966753e 100644 --- a/techpack/audio/ipc/apr.c +++ b/techpack/audio/ipc/apr.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2014, 2016-2019 The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2014, 2016-2020 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1119,9 +1119,9 @@ static int __init apr_debug_init(void) } #else static int __init apr_debug_init(void) -( +{ return 0; -) +} #endif static void apr_cleanup(void) @@ -1142,7 +1142,9 @@ static void apr_cleanup(void) mutex_destroy(&client[i][j].svc[k].m_lock); } } +#ifdef CONFIG_DEBUG_FS debugfs_remove(debugfs_apr_debug); +#endif } static int apr_probe(struct platform_device *pdev) diff --git a/techpack/data/drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c b/techpack/data/drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c index a2e32ec55d87..da9209746be6 100644 --- a/techpack/data/drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c +++ b/techpack/data/drivers/emac-dwc-eqos/DWC_ETH_QOS_dev.c @@ -1134,11 +1134,11 @@ static INT drop_tx_status_enabled(void) * \retval -1 Failure */ -static INT config_sub_second_increment(ULONG ptp_clock) +static INT config_sub_second_increment(u64 ptp_clock) { - ULONG VARMAC_TCR; - double ss_inc = 0; - double sns_inc = 0; + ULONG VARMAC_TCR = 0; + u64 ss_inc = 0; + u64 sns_inc = 0; MAC_TCR_RGRD(VARMAC_TCR); @@ -1147,12 +1147,17 @@ static INT config_sub_second_increment(ULONG ptp_clock) /* where, ptp_clock = 50MHz if FINE correction */ /* and ptp_clock = DWC_ETH_QOS_SYSCLOCK if COARSE correction */ if (GET_VALUE(VARMAC_TCR, MAC_TCR_TSCFUPDT_LPOS, MAC_TCR_TSCFUPDT_HPOS) == 1) { - EMACDBG("Using PTP clock %ld MHz\n", ptp_clock); - ss_inc = (double)1000000000.0 / (double)ptp_clock; + EMACDBG("Using PTP clock %lu MHz\n", ptp_clock); + ss_inc = div_u64((1 * 1000000000ull), ptp_clock); + sns_inc = 1000000000ull - (ss_inc * ptp_clock); //take remainder + sns_inc = div_u64((sns_inc * 256), ptp_clock); //sns_inc needs to be multiplied by 2^8, per spec. + } else { EMACDBG("Using SYSCLOCK for coarse correction\n"); - ss_inc = (double)1000000000.0 / (double)DWC_ETH_QOS_SYSCLOCK; + ss_inc = div_u64((1 * 1000000000ull), DWC_ETH_QOS_SYSCLOCK); + sns_inc = 1000000000ull - (ss_inc * DWC_ETH_QOS_SYSCLOCK); //take remainder + sns_inc = div_u64((sns_inc * 256), DWC_ETH_QOS_SYSCLOCK); //sns_inc needs to be multiplied by 2^8, per spec. } /* 0.465ns accuracy */ @@ -1160,16 +1165,12 @@ static INT config_sub_second_increment(ULONG ptp_clock) VARMAC_TCR, MAC_TCR_TSCTRLSSR_LPOS, MAC_TCR_TSCTRLSSR_HPOS) == 0) { EMACDBG("using 0.465 ns accuracy"); - ss_inc /= 0.465; + ss_inc = div_u64((ss_inc * 1000), 465); } - sns_inc = ss_inc - (int)ss_inc; // take remainder - sns_inc *= 256.0; // sns_inc needs to be multiplied by 2^8, per spec. - sns_inc += 0.5; // round to nearest int value. - - MAC_SSIR_SSINC_UDFWR((int)ss_inc); - MAC_SSIR_SNSINC_UDFWR((int)sns_inc); - EMACDBG("ss_inc = %d, sns_inc = %d\n", (int)ss_inc, (int)sns_inc); + MAC_SSIR_SSINC_UDFWR(ss_inc); + MAC_SSIR_SNSINC_UDFWR(sns_inc); + EMACDBG("ss_inc = %lu, sns_inc = %lu\n", ss_inc, sns_inc); return Y_SUCCESS; } diff --git a/techpack/data/drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.c b/techpack/data/drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.c index d2026101c6c0..4fc086a59d58 100644 --- a/techpack/data/drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.c +++ b/techpack/data/drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.c @@ -4047,6 +4047,7 @@ int DWC_ETH_QOS_poll_mq(struct napi_struct *napi, int budget) struct DWC_ETH_QOS_prv_data *pdata = rx_queue->pdata; /* divide the budget evenly among all the queues */ int per_q_budget = budget / DWC_ETH_QOS_RX_QUEUE_CNT; + int q_budget_used = 0; int qinx = 0; int received = 0, per_q_received = 0; unsigned long flags; @@ -4077,6 +4078,10 @@ int DWC_ETH_QOS_poll_mq(struct napi_struct *napi, int budget) received += per_q_received; pdata->xstats.rx_pkt_n += per_q_received; pdata->xstats.q_rx_pkt_n[qinx] += per_q_received; + + if (per_q_received > 0) + q_budget_used += per_q_budget; + #ifdef DWC_INET_LRO if (rx_queue->lro_flush_needed) lro_flush_all(&rx_queue->lro_mgr); @@ -4086,7 +4091,7 @@ int DWC_ETH_QOS_poll_mq(struct napi_struct *napi, int budget) /* If we processed all pkts, we are done; * tell the kernel & re-enable interrupt */ - if (received < budget) { + if ((received < q_budget_used) || (received == 0)) { if (pdata->dev->features & NETIF_F_GRO) { /* to turn off polling */ napi_complete(napi); @@ -4106,11 +4111,12 @@ int DWC_ETH_QOS_poll_mq(struct napi_struct *napi, int budget) DWC_ETH_QOS_enable_all_ch_rx_interrpt(pdata); spin_unlock_irqrestore(&pdata->lock, flags); } + return received; } DBGPR("<--DWC_ETH_QOS_poll_mq\n"); - return received; + return budget; } /*! @@ -5129,7 +5135,7 @@ static int ETH_PTPCLK_Config(struct DWC_ETH_QOS_prv_data *pdata, struct ETH_PPS_ pdata->ptpclk_freq = eth_pps_cfg->ptpclk_freq; ret = hw_if->config_default_addend(pdata, (ULONG)eth_pps_cfg->ptpclk_freq); - ret |= hw_if->config_sub_second_increment( (ULONG)eth_pps_cfg->ptpclk_freq); + ret |= hw_if->config_sub_second_increment(eth_pps_cfg->ptpclk_freq); return ret; } @@ -5728,11 +5734,11 @@ static int DWC_ETH_QOS_handle_prv_ioctl(struct DWC_ETH_QOS_prv_data *pdata, break; case DWC_ETH_QOS_DCB_ALGORITHM: - DWC_ETH_QOS_program_dcb_algorithm(pdata, req); + ret = DWC_ETH_QOS_program_dcb_algorithm(pdata, req); break; case DWC_ETH_QOS_AVB_ALGORITHM: - DWC_ETH_QOS_program_avb_algorithm(pdata, req); + ret = DWC_ETH_QOS_program_avb_algorithm(pdata, req); break; case DWC_ETH_QOS_RX_SPLIT_HDR_CMD: @@ -6805,19 +6811,22 @@ static void DWC_ETH_QOS_config_tx_pbl(struct DWC_ETH_QOS_prv_data *pdata, * \retval none */ -static void DWC_ETH_QOS_program_dcb_algorithm( +static int DWC_ETH_QOS_program_dcb_algorithm( struct DWC_ETH_QOS_prv_data *pdata, struct ifr_data_struct *req) { struct DWC_ETH_QOS_dcb_algorithm l_dcb_struct, *u_dcb_struct = (struct DWC_ETH_QOS_dcb_algorithm *)req->ptr; struct hw_if_struct *hw_if = &pdata->hw_if; + int ret = 0; DBGPR("-->DWC_ETH_QOS_program_dcb_algorithm\n"); if (copy_from_user(&l_dcb_struct, u_dcb_struct, - sizeof(struct DWC_ETH_QOS_dcb_algorithm))) + sizeof(struct DWC_ETH_QOS_dcb_algorithm))) { dev_alert(&pdata->pdev->dev, "Failed to fetch DCB Struct info from user\n"); + return -EFAULT; + } hw_if->set_tx_queue_operating_mode(l_dcb_struct.qinx, (UINT)l_dcb_struct.op_mode); @@ -6825,6 +6834,7 @@ static void DWC_ETH_QOS_program_dcb_algorithm( hw_if->set_dcb_queue_weight(l_dcb_struct.qinx, l_dcb_struct.weight); DBGPR("<--DWC_ETH_QOS_program_dcb_algorithm\n"); + return ret; } /*! * \details This function configure @@ -6877,18 +6887,21 @@ void DWC_ETH_QOS_program_avb_algorithm_hw_register( * \retval none */ -static void DWC_ETH_QOS_program_avb_algorithm( +static int DWC_ETH_QOS_program_avb_algorithm( struct DWC_ETH_QOS_prv_data *pdata, struct ifr_data_struct *req) { struct DWC_ETH_QOS_avb_algorithm l_avb_struct, *u_avb_struct = (struct DWC_ETH_QOS_avb_algorithm *)req->ptr; + int ret = 0; DBGPR("-->DWC_ETH_QOS_program_avb_algorithm\n"); if (copy_from_user(&l_avb_struct, u_avb_struct, - sizeof(struct DWC_ETH_QOS_avb_algorithm))) + sizeof(struct DWC_ETH_QOS_avb_algorithm))) { dev_alert(&pdata->pdev->dev, "Failed to fetch AVB Struct info from user\n"); + return -EFAULT; + } /*Application uses 1 for CLASS A traffic and 2 for CLASS B traffic @@ -6910,6 +6923,7 @@ static void DWC_ETH_QOS_program_avb_algorithm( /*Backup speed*/ pdata->avb_algorithm_speed_backup = pdata->speed; DBGPR("<--DWC_ETH_QOS_program_avb_algorithm\n"); + return ret; } /*! diff --git a/techpack/data/drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.h b/techpack/data/drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.h index c94d45c993e2..6c9463c50c9f 100644 --- a/techpack/data/drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.h +++ b/techpack/data/drivers/emac-dwc-eqos/DWC_ETH_QOS_drv.h @@ -74,10 +74,10 @@ static netdev_features_t DWC_ETH_QOS_fix_features( INT DWC_ETH_QOS_configure_remotewakeup(struct net_device *dev, struct ifr_data_struct *req); -static void DWC_ETH_QOS_program_dcb_algorithm( +static int DWC_ETH_QOS_program_dcb_algorithm( struct DWC_ETH_QOS_prv_data *pdata, struct ifr_data_struct *req); -static void DWC_ETH_QOS_program_avb_algorithm( +static int DWC_ETH_QOS_program_avb_algorithm( struct DWC_ETH_QOS_prv_data *pdata, struct ifr_data_struct *req); static void DWC_ETH_QOS_config_tx_pbl(struct DWC_ETH_QOS_prv_data *pdata, diff --git a/techpack/data/drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h b/techpack/data/drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h index 69aa01bf4230..20c1ae62fd6a 100644 --- a/techpack/data/drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h +++ b/techpack/data/drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2017-2019, The Linux Foundation. All rights +/* Copyright (c) 2017-2020, The Linux Foundation. All rights * reserved. * * This program is free software; you can redistribute it and/or modify @@ -1013,7 +1013,7 @@ struct hw_if_struct { /* for hw time stamping */ INT(*config_hw_time_stamping)(UINT); - INT(*config_sub_second_increment)(unsigned long ptp_clock); + INT(*config_sub_second_increment)(u64 ptp_clock); INT(*config_default_addend)(struct DWC_ETH_QOS_prv_data *pdata, unsigned long ptp_clock); INT(*init_systime)(UINT, UINT); INT(*config_addend)(UINT); diff --git a/techpack/data/drivers/rmnet/shs/rmnet_shs_config.h b/techpack/data/drivers/rmnet/shs/rmnet_shs_config.h index 10b8f58fa331..8d318c173bed 100644 --- a/techpack/data/drivers/rmnet/shs/rmnet_shs_config.h +++ b/techpack/data/drivers/rmnet/shs/rmnet_shs_config.h @@ -48,6 +48,7 @@ enum rmnet_shs_crit_err_e { RMNET_SHS_WQ_NL_SOCKET_ERR, RMNET_SHS_CPU_FLOWS_BNDS_ERR, RMNET_SHS_OUT_OF_MEM_ERR, + RMNET_SHS_UDP_SEGMENT, RMNET_SHS_CRIT_ERR_MAX }; diff --git a/techpack/data/drivers/rmnet/shs/rmnet_shs_main.c b/techpack/data/drivers/rmnet/shs/rmnet_shs_main.c index 7f9357660e0e..77569a1827e5 100755 --- a/techpack/data/drivers/rmnet/shs/rmnet_shs_main.c +++ b/techpack/data/drivers/rmnet/shs/rmnet_shs_main.c @@ -1012,6 +1012,8 @@ void rmnet_shs_flush_node(struct rmnet_shs_skbn_s *node, u8 ctext) skb_bytes_delivered += skb->len; if (segs_per_skb > 0) { + if (node->skb_tport_proto == IPPROTO_UDP) + rmnet_shs_crit_err[RMNET_SHS_UDP_SEGMENT]++; rmnet_shs_deliver_skb_segmented(skb, ctext, segs_per_skb); } else { @@ -1511,7 +1513,7 @@ int rmnet_shs_drop_backlog(struct sk_buff_head *list, int cpu) return 0; } - +/* This will run in process context, avoid disabling bh */ static int rmnet_shs_oom_notify(struct notifier_block *self, unsigned long emtpy, void *free) { @@ -1520,7 +1522,6 @@ static int rmnet_shs_oom_notify(struct notifier_block *self, struct sk_buff_head *process_q; struct sk_buff_head *input_q; - local_bh_disable(); for_each_possible_cpu(cpu) { process_q = &GET_PQUEUE(cpu); @@ -1541,7 +1542,6 @@ static int rmnet_shs_oom_notify(struct notifier_block *self, (*nfree)++; } } - local_bh_enable(); return 0; } diff --git a/techpack/data/drivers/rmnet/shs/rmnet_shs_wq.c b/techpack/data/drivers/rmnet/shs/rmnet_shs_wq.c index fab0981861d9..96905722dd72 100644 --- a/techpack/data/drivers/rmnet/shs/rmnet_shs_wq.c +++ b/techpack/data/drivers/rmnet/shs/rmnet_shs_wq.c @@ -296,6 +296,7 @@ void rmnet_shs_wq_hstat_reset_node(struct rmnet_shs_wq_hstat_s *hnode) hnode->hash = 0; hnode->suggested_cpu = 0; hnode->current_cpu = 0; + hnode->segs_per_skb = 0; hnode->skb_tport_proto = 0; hnode->stat_idx = -1; INIT_LIST_HEAD(&hnode->cpu_node_id); @@ -409,7 +410,8 @@ void rmnet_shs_wq_create_new_flow(struct rmnet_shs_skbn_s *node_p) node_p->hstats->skb_tport_proto = node_p->skb_tport_proto; node_p->hstats->current_cpu = node_p->map_cpu; node_p->hstats->suggested_cpu = node_p->map_cpu; - + /* Set egmentation off by default */ + node_p->hstats->segs_per_skb = 0; /* Start TCP flows with segmentation if userspace connected */ if (rmnet_shs_userspace_connected && node_p->hstats->skb_tport_proto == IPPROTO_TCP)