Merge fb348857e7 ("io_uring: ensure IOPOLL locks around deferred work") into android13-5.15-lts
Steps on the way to 5.15.121 Resolves merge conflicts in: drivers/char/hw_random/virtio-rng.c Change-Id: Ib59e8cd3275125cf9b54881536bc371c0e686acf Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
@@ -8,6 +8,10 @@
|
||||
|
||||
#include <asm/dwarf.h>
|
||||
|
||||
#define ASM_NL ` /* use '`' to mark new line in macro */
|
||||
#define __ALIGN .align 4
|
||||
#define __ALIGN_STR __stringify(__ALIGN)
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
.macro ST2 e, o, off
|
||||
@@ -28,10 +32,6 @@
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#define ASM_NL ` /* use '`' to mark new line in macro */
|
||||
#define __ALIGN .align 4
|
||||
#define __ALIGN_STR __stringify(__ALIGN)
|
||||
|
||||
/* annotation for data we want in DCCM - if enabled in .config */
|
||||
.macro ARCFP_DATA nm
|
||||
#ifdef CONFIG_ARC_HAS_DCCM
|
||||
|
||||
@@ -764,9 +764,9 @@ static void free_pud_table(pud_t *pud_start, p4d_t *p4d)
|
||||
}
|
||||
|
||||
static void remove_pte_table(pte_t *pte_start, unsigned long addr,
|
||||
unsigned long end)
|
||||
unsigned long end, bool direct)
|
||||
{
|
||||
unsigned long next;
|
||||
unsigned long next, pages = 0;
|
||||
pte_t *pte;
|
||||
|
||||
pte = pte_start + pte_index(addr);
|
||||
@@ -788,13 +788,16 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
|
||||
}
|
||||
|
||||
pte_clear(&init_mm, addr, pte);
|
||||
pages++;
|
||||
}
|
||||
if (direct)
|
||||
update_page_count(mmu_virtual_psize, -pages);
|
||||
}
|
||||
|
||||
static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
|
||||
unsigned long end)
|
||||
unsigned long end, bool direct)
|
||||
{
|
||||
unsigned long next;
|
||||
unsigned long next, pages = 0;
|
||||
pte_t *pte_base;
|
||||
pmd_t *pmd;
|
||||
|
||||
@@ -812,19 +815,22 @@ static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
|
||||
continue;
|
||||
}
|
||||
pte_clear(&init_mm, addr, (pte_t *)pmd);
|
||||
pages++;
|
||||
continue;
|
||||
}
|
||||
|
||||
pte_base = (pte_t *)pmd_page_vaddr(*pmd);
|
||||
remove_pte_table(pte_base, addr, next);
|
||||
remove_pte_table(pte_base, addr, next, direct);
|
||||
free_pte_table(pte_base, pmd);
|
||||
}
|
||||
if (direct)
|
||||
update_page_count(MMU_PAGE_2M, -pages);
|
||||
}
|
||||
|
||||
static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
|
||||
unsigned long end)
|
||||
unsigned long end, bool direct)
|
||||
{
|
||||
unsigned long next;
|
||||
unsigned long next, pages = 0;
|
||||
pmd_t *pmd_base;
|
||||
pud_t *pud;
|
||||
|
||||
@@ -842,16 +848,20 @@ static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
|
||||
continue;
|
||||
}
|
||||
pte_clear(&init_mm, addr, (pte_t *)pud);
|
||||
pages++;
|
||||
continue;
|
||||
}
|
||||
|
||||
pmd_base = pud_pgtable(*pud);
|
||||
remove_pmd_table(pmd_base, addr, next);
|
||||
remove_pmd_table(pmd_base, addr, next, direct);
|
||||
free_pmd_table(pmd_base, pud);
|
||||
}
|
||||
if (direct)
|
||||
update_page_count(MMU_PAGE_1G, -pages);
|
||||
}
|
||||
|
||||
static void __meminit remove_pagetable(unsigned long start, unsigned long end)
|
||||
static void __meminit remove_pagetable(unsigned long start, unsigned long end,
|
||||
bool direct)
|
||||
{
|
||||
unsigned long addr, next;
|
||||
pud_t *pud_base;
|
||||
@@ -880,7 +890,7 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end)
|
||||
}
|
||||
|
||||
pud_base = p4d_pgtable(*p4d);
|
||||
remove_pud_table(pud_base, addr, next);
|
||||
remove_pud_table(pud_base, addr, next, direct);
|
||||
free_pud_table(pud_base, p4d);
|
||||
}
|
||||
|
||||
@@ -903,7 +913,7 @@ int __meminit radix__create_section_mapping(unsigned long start,
|
||||
|
||||
int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
|
||||
{
|
||||
remove_pagetable(start, end);
|
||||
remove_pagetable(start, end, true);
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||
@@ -939,7 +949,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
|
||||
{
|
||||
remove_pagetable(start, start + page_size);
|
||||
remove_pagetable(start, start + page_size, false);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@@ -188,7 +188,7 @@ static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long star
|
||||
unsigned long nr_pfn = page_size / sizeof(struct page);
|
||||
unsigned long start_pfn = page_to_pfn((struct page *)start);
|
||||
|
||||
if ((start_pfn + nr_pfn) > altmap->end_pfn)
|
||||
if ((start_pfn + nr_pfn - 1) > altmap->end_pfn)
|
||||
return true;
|
||||
|
||||
if (start_pfn < altmap->base_pfn)
|
||||
|
||||
@@ -67,6 +67,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
||||
struct uprobe_task *utask = current->utask;
|
||||
|
||||
WARN_ON_ONCE(current->thread.bad_cause != UPROBE_TRAP_NR);
|
||||
current->thread.bad_cause = utask->autask.saved_cause;
|
||||
|
||||
instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
|
||||
|
||||
@@ -102,6 +103,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
||||
{
|
||||
struct uprobe_task *utask = current->utask;
|
||||
|
||||
current->thread.bad_cause = utask->autask.saved_cause;
|
||||
/*
|
||||
* Task has received a fatal signal, so reset back to probbed
|
||||
* address.
|
||||
|
||||
@@ -42,7 +42,6 @@
|
||||
|
||||
struct st_rng_data {
|
||||
void __iomem *base;
|
||||
struct clk *clk;
|
||||
struct hwrng ops;
|
||||
};
|
||||
|
||||
@@ -85,26 +84,18 @@ static int st_rng_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(base))
|
||||
return PTR_ERR(base);
|
||||
|
||||
clk = devm_clk_get(&pdev->dev, NULL);
|
||||
clk = devm_clk_get_enabled(&pdev->dev, NULL);
|
||||
if (IS_ERR(clk))
|
||||
return PTR_ERR(clk);
|
||||
|
||||
ret = clk_prepare_enable(clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ddata->ops.priv = (unsigned long)ddata;
|
||||
ddata->ops.read = st_rng_read;
|
||||
ddata->ops.name = pdev->name;
|
||||
ddata->base = base;
|
||||
ddata->clk = clk;
|
||||
|
||||
dev_set_drvdata(&pdev->dev, ddata);
|
||||
|
||||
ret = devm_hwrng_register(&pdev->dev, &ddata->ops);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Failed to register HW RNG\n");
|
||||
clk_disable_unprepare(clk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -113,15 +104,6 @@ static int st_rng_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int st_rng_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct st_rng_data *ddata = dev_get_drvdata(&pdev->dev);
|
||||
|
||||
clk_disable_unprepare(ddata->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id st_rng_match[] __maybe_unused = {
|
||||
{ .compatible = "st,rng" },
|
||||
{},
|
||||
@@ -134,7 +116,6 @@ static struct platform_driver st_rng_driver = {
|
||||
.of_match_table = of_match_ptr(st_rng_match),
|
||||
},
|
||||
.probe = st_rng_probe,
|
||||
.remove = st_rng_remove
|
||||
};
|
||||
|
||||
module_platform_driver(st_rng_driver);
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
* Copyright (C) 2007, 2008 Rusty Russell IBM Corporation
|
||||
*/
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/hw_random.h>
|
||||
#include <linux/scatterlist.h>
|
||||
@@ -20,12 +21,12 @@ struct virtrng_info {
|
||||
struct virtqueue *vq;
|
||||
char name[25];
|
||||
int index;
|
||||
bool busy;
|
||||
bool hwrng_register_done;
|
||||
bool hwrng_removed;
|
||||
/* data transfer */
|
||||
struct completion have_data;
|
||||
unsigned int data_avail;
|
||||
unsigned int data_idx;
|
||||
/* minimal size returned by rng_buffer_size() */
|
||||
#if SMP_CACHE_BYTES < 32
|
||||
u8 data[32];
|
||||
@@ -37,19 +38,23 @@ struct virtrng_info {
|
||||
static void random_recv_done(struct virtqueue *vq)
|
||||
{
|
||||
struct virtrng_info *vi = vq->vdev->priv;
|
||||
unsigned int len;
|
||||
|
||||
/* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
|
||||
if (!virtqueue_get_buf(vi->vq, &vi->data_avail))
|
||||
if (!virtqueue_get_buf(vi->vq, &len))
|
||||
return;
|
||||
|
||||
smp_store_release(&vi->data_avail, len);
|
||||
complete(&vi->have_data);
|
||||
}
|
||||
|
||||
/* The host will fill any buffer we give it with sweet, sweet randomness. */
|
||||
static void register_buffer(struct virtrng_info *vi)
|
||||
static void request_entropy(struct virtrng_info *vi)
|
||||
{
|
||||
struct scatterlist sg;
|
||||
|
||||
reinit_completion(&vi->have_data);
|
||||
vi->data_idx = 0;
|
||||
|
||||
sg_init_one(&sg, vi->data, sizeof(vi->data));
|
||||
|
||||
/* There should always be room for one buffer. */
|
||||
@@ -58,6 +63,18 @@ static void register_buffer(struct virtrng_info *vi)
|
||||
virtqueue_kick(vi->vq);
|
||||
}
|
||||
|
||||
static unsigned int copy_data(struct virtrng_info *vi, void *buf,
|
||||
unsigned int size)
|
||||
{
|
||||
size = min_t(unsigned int, size, vi->data_avail);
|
||||
memcpy(buf, vi->data + vi->data_idx, size);
|
||||
vi->data_idx += size;
|
||||
vi->data_avail -= size;
|
||||
if (vi->data_avail == 0)
|
||||
request_entropy(vi);
|
||||
return size;
|
||||
}
|
||||
|
||||
static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
|
||||
{
|
||||
int ret;
|
||||
@@ -68,35 +85,37 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
|
||||
if (vi->hwrng_removed)
|
||||
return -ENODEV;
|
||||
|
||||
if (!vi->busy) {
|
||||
vi->busy = true;
|
||||
reinit_completion(&vi->have_data);
|
||||
register_buffer(vi);
|
||||
read = 0;
|
||||
|
||||
/* copy available data */
|
||||
if (smp_load_acquire(&vi->data_avail)) {
|
||||
chunk = copy_data(vi, buf, size);
|
||||
size -= chunk;
|
||||
read += chunk;
|
||||
}
|
||||
|
||||
if (!wait)
|
||||
return 0;
|
||||
return read;
|
||||
|
||||
read = 0;
|
||||
/* We have already copied available entropy,
|
||||
* so either size is 0 or data_avail is 0
|
||||
*/
|
||||
while (size != 0) {
|
||||
/* data_avail is 0 but a request is pending */
|
||||
ret = wait_for_completion_killable(&vi->have_data);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
/* if vi->data_avail is 0, we have been interrupted
|
||||
* by a cleanup, but buffer stays in the queue
|
||||
*/
|
||||
if (vi->data_avail == 0)
|
||||
return read;
|
||||
|
||||
chunk = min_t(unsigned int, size, vi->data_avail);
|
||||
memcpy(buf + read, vi->data, chunk);
|
||||
read += chunk;
|
||||
chunk = copy_data(vi, buf + read, size);
|
||||
size -= chunk;
|
||||
vi->data_avail = 0;
|
||||
|
||||
if (size != 0) {
|
||||
reinit_completion(&vi->have_data);
|
||||
register_buffer(vi);
|
||||
}
|
||||
read += chunk;
|
||||
}
|
||||
|
||||
vi->busy = false;
|
||||
|
||||
return read;
|
||||
}
|
||||
|
||||
@@ -104,8 +123,7 @@ static void virtio_cleanup(struct hwrng *rng)
|
||||
{
|
||||
struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
|
||||
|
||||
if (vi->busy)
|
||||
wait_for_completion(&vi->have_data);
|
||||
complete(&vi->have_data);
|
||||
}
|
||||
|
||||
static int probe_common(struct virtio_device *vdev)
|
||||
@@ -141,6 +159,9 @@ static int probe_common(struct virtio_device *vdev)
|
||||
goto err_find;
|
||||
}
|
||||
|
||||
/* we always have a pending entropy request */
|
||||
request_entropy(vi);
|
||||
|
||||
return 0;
|
||||
|
||||
err_find:
|
||||
@@ -156,9 +177,9 @@ static void remove_common(struct virtio_device *vdev)
|
||||
|
||||
vi->hwrng_removed = true;
|
||||
vi->data_avail = 0;
|
||||
vi->data_idx = 0;
|
||||
complete(&vi->have_data);
|
||||
vdev->config->reset(vdev);
|
||||
vi->busy = false;
|
||||
if (vi->hwrng_register_done)
|
||||
hwrng_unregister(&vi->hwrng);
|
||||
vdev->config->del_vqs(vdev);
|
||||
|
||||
@@ -297,7 +297,7 @@ static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
|
||||
static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
|
||||
const u8 *key, unsigned int len)
|
||||
{
|
||||
struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
|
||||
struct mv_cesa_des3_ctx *ctx = crypto_skcipher_ctx(cipher);
|
||||
int err;
|
||||
|
||||
err = verify_skcipher_des3_key(cipher, key);
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
obj-$(CONFIG_CRYPTO_DEV_NX_ENCRYPT) += nx-crypto.o
|
||||
nx-crypto-objs := nx.o \
|
||||
nx_debugfs.o \
|
||||
nx-aes-cbc.o \
|
||||
nx-aes-ecb.o \
|
||||
nx-aes-gcm.o \
|
||||
@@ -11,6 +10,7 @@ nx-crypto-objs := nx.o \
|
||||
nx-sha256.o \
|
||||
nx-sha512.o
|
||||
|
||||
nx-crypto-$(CONFIG_DEBUG_FS) += nx_debugfs.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o
|
||||
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o
|
||||
nx-compress-objs := nx-842.o
|
||||
|
||||
@@ -170,8 +170,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
|
||||
void nx_debugfs_init(struct nx_crypto_driver *);
|
||||
void nx_debugfs_fini(struct nx_crypto_driver *);
|
||||
#else
|
||||
#define NX_DEBUGFS_INIT(drv) (0)
|
||||
#define NX_DEBUGFS_FINI(drv) (0)
|
||||
#define NX_DEBUGFS_INIT(drv) do {} while (0)
|
||||
#define NX_DEBUGFS_FINI(drv) do {} while (0)
|
||||
#endif
|
||||
|
||||
#define NX_PAGE_NUM(x) ((u64)(x) & 0xfffffffffffff000ULL)
|
||||
|
||||
@@ -49,11 +49,6 @@ struct service_hndl {
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
static inline int get_current_node(void)
|
||||
{
|
||||
return topology_physical_package_id(raw_smp_processor_id());
|
||||
}
|
||||
|
||||
int adf_service_register(struct service_hndl *service);
|
||||
int adf_service_unregister(struct service_hndl *service);
|
||||
|
||||
|
||||
@@ -605,7 +605,7 @@ static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
|
||||
{
|
||||
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct qat_crypto_instance *inst = NULL;
|
||||
int node = get_current_node();
|
||||
int node = numa_node_id();
|
||||
struct device *dev;
|
||||
int ret;
|
||||
|
||||
@@ -706,7 +706,8 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
|
||||
static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
||||
struct scatterlist *sgl,
|
||||
struct scatterlist *sglout,
|
||||
struct qat_crypto_request *qat_req)
|
||||
struct qat_crypto_request *qat_req,
|
||||
gfp_t flags)
|
||||
{
|
||||
struct device *dev = &GET_DEV(inst->accel_dev);
|
||||
int i, sg_nctr = 0;
|
||||
@@ -727,7 +728,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
||||
qat_req->buf.sgl_dst_valid = false;
|
||||
|
||||
if (n > QAT_MAX_BUFF_DESC) {
|
||||
bufl = kzalloc_node(sz, GFP_ATOMIC, node);
|
||||
bufl = kzalloc_node(sz, flags, node);
|
||||
if (unlikely(!bufl))
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
@@ -771,7 +772,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
||||
sg_nctr = 0;
|
||||
|
||||
if (n > QAT_MAX_BUFF_DESC) {
|
||||
buflout = kzalloc_node(sz_out, GFP_ATOMIC, node);
|
||||
buflout = kzalloc_node(sz_out, flags, node);
|
||||
if (unlikely(!buflout))
|
||||
goto err_in;
|
||||
} else {
|
||||
@@ -972,6 +973,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
|
||||
struct icp_qat_fw_la_auth_req_params *auth_param;
|
||||
struct icp_qat_fw_la_bulk_req *msg;
|
||||
int digst_size = crypto_aead_authsize(aead_tfm);
|
||||
gfp_t f = qat_algs_alloc_flags(&areq->base);
|
||||
int ret;
|
||||
u32 cipher_len;
|
||||
|
||||
@@ -979,7 +981,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
|
||||
if (cipher_len % AES_BLOCK_SIZE != 0)
|
||||
return -EINVAL;
|
||||
|
||||
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
|
||||
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
@@ -1014,6 +1016,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
|
||||
struct qat_crypto_request *qat_req = aead_request_ctx(areq);
|
||||
struct icp_qat_fw_la_cipher_req_params *cipher_param;
|
||||
struct icp_qat_fw_la_auth_req_params *auth_param;
|
||||
gfp_t f = qat_algs_alloc_flags(&areq->base);
|
||||
struct icp_qat_fw_la_bulk_req *msg;
|
||||
u8 *iv = areq->iv;
|
||||
int ret;
|
||||
@@ -1021,7 +1024,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
|
||||
if (areq->cryptlen % AES_BLOCK_SIZE != 0)
|
||||
return -EINVAL;
|
||||
|
||||
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
|
||||
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
@@ -1068,7 +1071,7 @@ static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
|
||||
{
|
||||
struct qat_crypto_instance *inst = NULL;
|
||||
struct device *dev;
|
||||
int node = get_current_node();
|
||||
int node = numa_node_id();
|
||||
int ret;
|
||||
|
||||
inst = qat_crypto_get_instance_node(node);
|
||||
@@ -1199,13 +1202,14 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
|
||||
struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
|
||||
struct icp_qat_fw_la_cipher_req_params *cipher_param;
|
||||
gfp_t f = qat_algs_alloc_flags(&req->base);
|
||||
struct icp_qat_fw_la_bulk_req *msg;
|
||||
int ret;
|
||||
|
||||
if (req->cryptlen == 0)
|
||||
return 0;
|
||||
|
||||
ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
|
||||
ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
@@ -1264,13 +1268,14 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
|
||||
struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
|
||||
struct icp_qat_fw_la_cipher_req_params *cipher_param;
|
||||
gfp_t f = qat_algs_alloc_flags(&req->base);
|
||||
struct icp_qat_fw_la_bulk_req *msg;
|
||||
int ret;
|
||||
|
||||
if (req->cryptlen == 0)
|
||||
return 0;
|
||||
|
||||
ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
|
||||
ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
|
||||
@@ -170,15 +170,14 @@ static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
|
||||
}
|
||||
|
||||
areq->dst_len = req->ctx.dh->p_size;
|
||||
dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
|
||||
DMA_FROM_DEVICE);
|
||||
if (req->dst_align) {
|
||||
scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
|
||||
areq->dst_len, 1);
|
||||
kfree_sensitive(req->dst_align);
|
||||
}
|
||||
|
||||
dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
|
||||
DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, req->phy_out,
|
||||
@@ -224,9 +223,10 @@ static int qat_dh_compute_value(struct kpp_request *req)
|
||||
struct qat_asym_request *qat_req =
|
||||
PTR_ALIGN(kpp_request_ctx(req), 64);
|
||||
struct icp_qat_fw_pke_request *msg = &qat_req->req;
|
||||
int ret;
|
||||
gfp_t flags = qat_algs_alloc_flags(&req->base);
|
||||
int n_input_params = 0;
|
||||
u8 *vaddr;
|
||||
int ret;
|
||||
|
||||
if (unlikely(!ctx->xa))
|
||||
return -EINVAL;
|
||||
@@ -291,7 +291,7 @@ static int qat_dh_compute_value(struct kpp_request *req)
|
||||
} else {
|
||||
int shift = ctx->p_size - req->src_len;
|
||||
|
||||
qat_req->src_align = kzalloc(ctx->p_size, GFP_KERNEL);
|
||||
qat_req->src_align = kzalloc(ctx->p_size, flags);
|
||||
if (unlikely(!qat_req->src_align))
|
||||
return ret;
|
||||
|
||||
@@ -317,7 +317,7 @@ static int qat_dh_compute_value(struct kpp_request *req)
|
||||
qat_req->dst_align = NULL;
|
||||
vaddr = sg_virt(req->dst);
|
||||
} else {
|
||||
qat_req->dst_align = kzalloc(ctx->p_size, GFP_KERNEL);
|
||||
qat_req->dst_align = kzalloc(ctx->p_size, flags);
|
||||
if (unlikely(!qat_req->dst_align))
|
||||
goto unmap_src;
|
||||
|
||||
@@ -331,13 +331,13 @@ static int qat_dh_compute_value(struct kpp_request *req)
|
||||
qat_req->in.dh.in_tab[n_input_params] = 0;
|
||||
qat_req->out.dh.out_tab[1] = 0;
|
||||
/* Mapping in.in.b or in.in_g2.xa is the same */
|
||||
qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
|
||||
qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh,
|
||||
sizeof(struct qat_dh_input_params),
|
||||
DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
|
||||
goto unmap_dst;
|
||||
|
||||
qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
|
||||
qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh,
|
||||
sizeof(struct qat_dh_output_params),
|
||||
DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
|
||||
@@ -488,11 +488,13 @@ static int qat_dh_init_tfm(struct crypto_kpp *tfm)
|
||||
{
|
||||
struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
|
||||
struct qat_crypto_instance *inst =
|
||||
qat_crypto_get_instance_node(get_current_node());
|
||||
qat_crypto_get_instance_node(numa_node_id());
|
||||
|
||||
if (!inst)
|
||||
return -EINVAL;
|
||||
|
||||
kpp_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
|
||||
|
||||
ctx->p_size = 0;
|
||||
ctx->g2 = false;
|
||||
ctx->inst = inst;
|
||||
@@ -518,12 +520,14 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
|
||||
|
||||
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
|
||||
|
||||
kfree_sensitive(req->src_align);
|
||||
|
||||
dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
kfree_sensitive(req->src_align);
|
||||
|
||||
areq->dst_len = req->ctx.rsa->key_sz;
|
||||
dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
|
||||
DMA_FROM_DEVICE);
|
||||
if (req->dst_align) {
|
||||
scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
|
||||
areq->dst_len, 1);
|
||||
@@ -531,9 +535,6 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
|
||||
kfree_sensitive(req->dst_align);
|
||||
}
|
||||
|
||||
dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
|
||||
DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, req->phy_out,
|
||||
@@ -650,6 +651,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
|
||||
struct qat_asym_request *qat_req =
|
||||
PTR_ALIGN(akcipher_request_ctx(req), 64);
|
||||
struct icp_qat_fw_pke_request *msg = &qat_req->req;
|
||||
gfp_t flags = qat_algs_alloc_flags(&req->base);
|
||||
u8 *vaddr;
|
||||
int ret;
|
||||
|
||||
@@ -696,7 +698,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
|
||||
} else {
|
||||
int shift = ctx->key_sz - req->src_len;
|
||||
|
||||
qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL);
|
||||
qat_req->src_align = kzalloc(ctx->key_sz, flags);
|
||||
if (unlikely(!qat_req->src_align))
|
||||
return ret;
|
||||
|
||||
@@ -714,7 +716,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
|
||||
qat_req->dst_align = NULL;
|
||||
vaddr = sg_virt(req->dst);
|
||||
} else {
|
||||
qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL);
|
||||
qat_req->dst_align = kzalloc(ctx->key_sz, flags);
|
||||
if (unlikely(!qat_req->dst_align))
|
||||
goto unmap_src;
|
||||
vaddr = qat_req->dst_align;
|
||||
@@ -727,13 +729,13 @@ static int qat_rsa_enc(struct akcipher_request *req)
|
||||
|
||||
qat_req->in.rsa.in_tab[3] = 0;
|
||||
qat_req->out.rsa.out_tab[1] = 0;
|
||||
qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
|
||||
qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
|
||||
sizeof(struct qat_rsa_input_params),
|
||||
DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
|
||||
goto unmap_dst;
|
||||
|
||||
qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
|
||||
qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
|
||||
sizeof(struct qat_rsa_output_params),
|
||||
DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
|
||||
@@ -783,6 +785,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
|
||||
struct qat_asym_request *qat_req =
|
||||
PTR_ALIGN(akcipher_request_ctx(req), 64);
|
||||
struct icp_qat_fw_pke_request *msg = &qat_req->req;
|
||||
gfp_t flags = qat_algs_alloc_flags(&req->base);
|
||||
u8 *vaddr;
|
||||
int ret;
|
||||
|
||||
@@ -839,7 +842,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
|
||||
} else {
|
||||
int shift = ctx->key_sz - req->src_len;
|
||||
|
||||
qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL);
|
||||
qat_req->src_align = kzalloc(ctx->key_sz, flags);
|
||||
if (unlikely(!qat_req->src_align))
|
||||
return ret;
|
||||
|
||||
@@ -857,7 +860,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
|
||||
qat_req->dst_align = NULL;
|
||||
vaddr = sg_virt(req->dst);
|
||||
} else {
|
||||
qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL);
|
||||
qat_req->dst_align = kzalloc(ctx->key_sz, flags);
|
||||
if (unlikely(!qat_req->dst_align))
|
||||
goto unmap_src;
|
||||
vaddr = qat_req->dst_align;
|
||||
@@ -872,13 +875,13 @@ static int qat_rsa_dec(struct akcipher_request *req)
|
||||
else
|
||||
qat_req->in.rsa.in_tab[3] = 0;
|
||||
qat_req->out.rsa.out_tab[1] = 0;
|
||||
qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
|
||||
qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
|
||||
sizeof(struct qat_rsa_input_params),
|
||||
DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
|
||||
goto unmap_dst;
|
||||
|
||||
qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
|
||||
qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
|
||||
sizeof(struct qat_rsa_output_params),
|
||||
DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
|
||||
@@ -1222,11 +1225,13 @@ static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
|
||||
{
|
||||
struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||
struct qat_crypto_instance *inst =
|
||||
qat_crypto_get_instance_node(get_current_node());
|
||||
qat_crypto_get_instance_node(numa_node_id());
|
||||
|
||||
if (!inst)
|
||||
return -EINVAL;
|
||||
|
||||
akcipher_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
|
||||
|
||||
ctx->key_sz = 0;
|
||||
ctx->inst = inst;
|
||||
return 0;
|
||||
@@ -1249,7 +1254,6 @@ static struct akcipher_alg rsa = {
|
||||
.max_size = qat_rsa_max_size,
|
||||
.init = qat_rsa_init_tfm,
|
||||
.exit = qat_rsa_exit_tfm,
|
||||
.reqsize = sizeof(struct qat_asym_request) + 64,
|
||||
.base = {
|
||||
.cra_name = "rsa",
|
||||
.cra_driver_name = "qat-rsa",
|
||||
@@ -1266,7 +1270,6 @@ static struct kpp_alg dh = {
|
||||
.max_size = qat_dh_max_size,
|
||||
.init = qat_dh_init_tfm,
|
||||
.exit = qat_dh_exit_tfm,
|
||||
.reqsize = sizeof(struct qat_asym_request) + 64,
|
||||
.base = {
|
||||
.cra_name = "dh",
|
||||
.cra_driver_name = "qat-dh",
|
||||
|
||||
@@ -109,4 +109,9 @@ static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev)
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req)
|
||||
{
|
||||
return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -402,18 +402,34 @@ static void unregister_dev_dax(void *dev)
|
||||
put_device(dev);
|
||||
}
|
||||
|
||||
static void dax_region_free(struct kref *kref)
|
||||
{
|
||||
struct dax_region *dax_region;
|
||||
|
||||
dax_region = container_of(kref, struct dax_region, kref);
|
||||
kfree(dax_region);
|
||||
}
|
||||
|
||||
void dax_region_put(struct dax_region *dax_region)
|
||||
{
|
||||
kref_put(&dax_region->kref, dax_region_free);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dax_region_put);
|
||||
|
||||
/* a return value >= 0 indicates this invocation invalidated the id */
|
||||
static int __free_dev_dax_id(struct dev_dax *dev_dax)
|
||||
{
|
||||
struct dax_region *dax_region = dev_dax->region;
|
||||
struct device *dev = &dev_dax->dev;
|
||||
struct dax_region *dax_region;
|
||||
int rc = dev_dax->id;
|
||||
|
||||
device_lock_assert(dev);
|
||||
|
||||
if (is_static(dax_region) || dev_dax->id < 0)
|
||||
if (!dev_dax->dyn_id || dev_dax->id < 0)
|
||||
return -1;
|
||||
dax_region = dev_dax->region;
|
||||
ida_free(&dax_region->ida, dev_dax->id);
|
||||
dax_region_put(dax_region);
|
||||
dev_dax->id = -1;
|
||||
return rc;
|
||||
}
|
||||
@@ -429,6 +445,20 @@ static int free_dev_dax_id(struct dev_dax *dev_dax)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int alloc_dev_dax_id(struct dev_dax *dev_dax)
|
||||
{
|
||||
struct dax_region *dax_region = dev_dax->region;
|
||||
int id;
|
||||
|
||||
id = ida_alloc(&dax_region->ida, GFP_KERNEL);
|
||||
if (id < 0)
|
||||
return id;
|
||||
kref_get(&dax_region->kref);
|
||||
dev_dax->dyn_id = true;
|
||||
dev_dax->id = id;
|
||||
return id;
|
||||
}
|
||||
|
||||
static ssize_t delete_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
@@ -516,20 +546,6 @@ static const struct attribute_group *dax_region_attribute_groups[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static void dax_region_free(struct kref *kref)
|
||||
{
|
||||
struct dax_region *dax_region;
|
||||
|
||||
dax_region = container_of(kref, struct dax_region, kref);
|
||||
kfree(dax_region);
|
||||
}
|
||||
|
||||
void dax_region_put(struct dax_region *dax_region)
|
||||
{
|
||||
kref_put(&dax_region->kref, dax_region_free);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dax_region_put);
|
||||
|
||||
static void dax_region_unregister(void *region)
|
||||
{
|
||||
struct dax_region *dax_region = region;
|
||||
@@ -591,10 +607,12 @@ EXPORT_SYMBOL_GPL(alloc_dax_region);
|
||||
static void dax_mapping_release(struct device *dev)
|
||||
{
|
||||
struct dax_mapping *mapping = to_dax_mapping(dev);
|
||||
struct dev_dax *dev_dax = to_dev_dax(dev->parent);
|
||||
struct device *parent = dev->parent;
|
||||
struct dev_dax *dev_dax = to_dev_dax(parent);
|
||||
|
||||
ida_free(&dev_dax->ida, mapping->id);
|
||||
kfree(mapping);
|
||||
put_device(parent);
|
||||
}
|
||||
|
||||
static void unregister_dax_mapping(void *data)
|
||||
@@ -734,6 +752,7 @@ static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id)
|
||||
dev = &mapping->dev;
|
||||
device_initialize(dev);
|
||||
dev->parent = &dev_dax->dev;
|
||||
get_device(dev->parent);
|
||||
dev->type = &dax_mapping_type;
|
||||
dev_set_name(dev, "mapping%d", mapping->id);
|
||||
rc = device_add(dev);
|
||||
@@ -1251,12 +1270,10 @@ static const struct attribute_group *dax_attribute_groups[] = {
|
||||
static void dev_dax_release(struct device *dev)
|
||||
{
|
||||
struct dev_dax *dev_dax = to_dev_dax(dev);
|
||||
struct dax_region *dax_region = dev_dax->region;
|
||||
struct dax_device *dax_dev = dev_dax->dax_dev;
|
||||
|
||||
put_dax(dax_dev);
|
||||
free_dev_dax_id(dev_dax);
|
||||
dax_region_put(dax_region);
|
||||
kfree(dev_dax->pgmap);
|
||||
kfree(dev_dax);
|
||||
}
|
||||
@@ -1280,6 +1297,7 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
|
||||
if (!dev_dax)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
dev_dax->region = dax_region;
|
||||
if (is_static(dax_region)) {
|
||||
if (dev_WARN_ONCE(parent, data->id < 0,
|
||||
"dynamic id specified to static region\n")) {
|
||||
@@ -1295,13 +1313,11 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
|
||||
goto err_id;
|
||||
}
|
||||
|
||||
rc = ida_alloc(&dax_region->ida, GFP_KERNEL);
|
||||
rc = alloc_dev_dax_id(dev_dax);
|
||||
if (rc < 0)
|
||||
goto err_id;
|
||||
dev_dax->id = rc;
|
||||
}
|
||||
|
||||
dev_dax->region = dax_region;
|
||||
dev = &dev_dax->dev;
|
||||
device_initialize(dev);
|
||||
dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id);
|
||||
@@ -1339,7 +1355,6 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
|
||||
dev_dax->target_node = dax_region->target_node;
|
||||
dev_dax->align = dax_region->align;
|
||||
ida_init(&dev_dax->ida);
|
||||
kref_get(&dax_region->kref);
|
||||
|
||||
inode = dax_inode(dax_dev);
|
||||
dev->devt = inode->i_rdev;
|
||||
|
||||
@@ -52,7 +52,8 @@ struct dax_mapping {
|
||||
* @region - parent region
|
||||
* @dax_dev - core dax functionality
|
||||
* @target_node: effective numa node if dev_dax memory range is onlined
|
||||
* @id: ida allocated id
|
||||
* @dyn_id: is this a dynamic or statically created instance
|
||||
* @id: ida allocated id when the dax_region is not static
|
||||
* @ida: mapping id allocator
|
||||
* @dev - device core
|
||||
* @pgmap - pgmap for memmap setup / lifetime (driver owned)
|
||||
@@ -64,6 +65,7 @@ struct dev_dax {
|
||||
struct dax_device *dax_dev;
|
||||
unsigned int align;
|
||||
int target_node;
|
||||
bool dyn_id;
|
||||
int id;
|
||||
struct ida ida;
|
||||
struct device dev;
|
||||
|
||||
@@ -88,7 +88,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
|
||||
if (!data->res_name)
|
||||
goto err_res_name;
|
||||
|
||||
rc = memory_group_register_static(numa_node, total_len);
|
||||
rc = memory_group_register_static(numa_node, PFN_UP(total_len));
|
||||
if (rc < 0)
|
||||
goto err_reg_mgid;
|
||||
data->mgid = rc;
|
||||
|
||||
@@ -439,8 +439,10 @@ efi_status_t efi_exit_boot_services(void *handle,
|
||||
{
|
||||
efi_status_t status;
|
||||
|
||||
status = efi_get_memory_map(map);
|
||||
if (efi_disable_pci_dma)
|
||||
efi_pci_disable_bridge_busmaster();
|
||||
|
||||
status = efi_get_memory_map(map);
|
||||
if (status != EFI_SUCCESS)
|
||||
goto fail;
|
||||
|
||||
@@ -448,9 +450,6 @@ efi_status_t efi_exit_boot_services(void *handle,
|
||||
if (status != EFI_SUCCESS)
|
||||
goto free_map;
|
||||
|
||||
if (efi_disable_pci_dma)
|
||||
efi_pci_disable_bridge_busmaster();
|
||||
|
||||
status = efi_bs_call(exit_boot_services, handle, *map->key_ptr);
|
||||
|
||||
if (status == EFI_INVALID_PARAMETER) {
|
||||
|
||||
@@ -4924,20 +4924,19 @@ oplock_break_ack:
|
||||
|
||||
_cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
|
||||
/*
|
||||
* releasing stale oplock after recent reconnect of smb session using
|
||||
* a now incorrect file handle is not a data integrity issue but do
|
||||
* not bother sending an oplock release if session to server still is
|
||||
* disconnected since oplock already released by the server
|
||||
* MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
|
||||
* an acknowledgment to be sent when the file has already been closed.
|
||||
* check for server null, since can race with kill_sb calling tree disconnect.
|
||||
*/
|
||||
if (!oplock_break_cancelled) {
|
||||
/* check for server null since can race with kill_sb calling tree disconnect */
|
||||
if (tcon->ses && tcon->ses->server) {
|
||||
rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
|
||||
volatile_fid, net_fid, cinode);
|
||||
cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
|
||||
} else
|
||||
pr_warn_once("lease break not sent for unmounted share\n");
|
||||
}
|
||||
spin_lock(&cinode->open_file_lock);
|
||||
if (tcon->ses && tcon->ses->server && !oplock_break_cancelled &&
|
||||
!list_empty(&cinode->openFileList)) {
|
||||
spin_unlock(&cinode->open_file_lock);
|
||||
rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
|
||||
volatile_fid, net_fid, cinode);
|
||||
cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
|
||||
} else
|
||||
spin_unlock(&cinode->open_file_lock);
|
||||
|
||||
cifs_done_oplock_break(cinode);
|
||||
}
|
||||
|
||||
@@ -423,7 +423,7 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
|
||||
out[baselen + 3] = PERIOD;
|
||||
|
||||
if (dot_present)
|
||||
memcpy(&out[baselen + 4], extension, 4);
|
||||
memcpy(out + baselen + 4, extension, 4);
|
||||
else
|
||||
out[baselen + 4] = '\0';
|
||||
smbConvertToUTF16((__le16 *)shortname, out, PATH_MAX,
|
||||
|
||||
@@ -924,6 +924,7 @@ out:
|
||||
out_noaction:
|
||||
return ret;
|
||||
session_recover:
|
||||
set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state);
|
||||
nfs4_schedule_session_recovery(session, status);
|
||||
dprintk("%s ERROR: %d Reset session\n", __func__, status);
|
||||
nfs41_sequence_free_slot(res);
|
||||
|
||||
@@ -52,7 +52,7 @@ bool acpi_dock_match(acpi_handle handle);
|
||||
bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs);
|
||||
union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
|
||||
u64 rev, u64 func, union acpi_object *argv4);
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static inline union acpi_object *
|
||||
acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
|
||||
u64 func, union acpi_object *argv4,
|
||||
@@ -68,6 +68,7 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
|
||||
|
||||
return obj;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define ACPI_INIT_DSM_ARGV4(cnt, eles) \
|
||||
{ \
|
||||
|
||||
@@ -18,6 +18,12 @@ static inline void *kpp_request_ctx(struct kpp_request *req)
|
||||
return req->__ctx;
|
||||
}
|
||||
|
||||
static inline void kpp_set_reqsize(struct crypto_kpp *kpp,
|
||||
unsigned int reqsize)
|
||||
{
|
||||
crypto_kpp_alg(kpp)->reqsize = reqsize;
|
||||
}
|
||||
|
||||
static inline void *kpp_tfm_ctx(struct crypto_kpp *tfm)
|
||||
{
|
||||
return tfm->base.__crt_ctx;
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
#define __LINUX_BOOTMEM_INFO_H
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kmemleak.h>
|
||||
|
||||
/*
|
||||
* Types for free bootmem stored in page->lru.next. These have to be in
|
||||
@@ -59,6 +60,7 @@ static inline void get_page_bootmem(unsigned long info, struct page *page,
|
||||
|
||||
static inline void free_bootmem_page(struct page *page)
|
||||
{
|
||||
kmemleak_free_part(page_to_virt(page), PAGE_SIZE);
|
||||
free_reserved_page(page);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -1524,6 +1524,8 @@ static void io_kill_timeout(struct io_kiocb *req, int status)
|
||||
|
||||
static void io_queue_deferred(struct io_ring_ctx *ctx)
|
||||
{
|
||||
lockdep_assert_held(&ctx->completion_lock);
|
||||
|
||||
while (!list_empty(&ctx->defer_list)) {
|
||||
struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
|
||||
struct io_defer_entry, list);
|
||||
@@ -1575,14 +1577,24 @@ static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
|
||||
io_queue_deferred(ctx);
|
||||
}
|
||||
|
||||
static inline void io_commit_cqring(struct io_ring_ctx *ctx)
|
||||
static inline bool io_commit_needs_flush(struct io_ring_ctx *ctx)
|
||||
{
|
||||
return ctx->off_timeout_used || ctx->drain_active;
|
||||
}
|
||||
|
||||
static inline void __io_commit_cqring(struct io_ring_ctx *ctx)
|
||||
{
|
||||
if (unlikely(ctx->off_timeout_used || ctx->drain_active))
|
||||
__io_commit_cqring_flush(ctx);
|
||||
/* order cqe stores with ring update */
|
||||
smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
|
||||
}
|
||||
|
||||
static inline void io_commit_cqring(struct io_ring_ctx *ctx)
|
||||
{
|
||||
if (unlikely(io_commit_needs_flush(ctx)))
|
||||
__io_commit_cqring_flush(ctx);
|
||||
__io_commit_cqring(ctx);
|
||||
}
|
||||
|
||||
static inline bool io_sqring_full(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_rings *r = ctx->rings;
|
||||
@@ -2521,7 +2533,12 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
|
||||
io_req_free_batch(&rb, req, &ctx->submit_state);
|
||||
}
|
||||
|
||||
io_commit_cqring(ctx);
|
||||
if (io_commit_needs_flush(ctx)) {
|
||||
spin_lock(&ctx->completion_lock);
|
||||
__io_commit_cqring_flush(ctx);
|
||||
spin_unlock(&ctx->completion_lock);
|
||||
}
|
||||
__io_commit_cqring(ctx);
|
||||
io_cqring_ev_posted_iopoll(ctx);
|
||||
io_req_free_batch_finish(ctx, &rb);
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ modname = $(notdir $(@:.mod.o=))
|
||||
part-of-module = y
|
||||
|
||||
quiet_cmd_cc_o_c = CC [M] $@
|
||||
cmd_cc_o_c = $(CC) $(filter-out $(CC_FLAGS_CFI), $(c_flags)) -c -o $@ $<
|
||||
cmd_cc_o_c = $(CC) $(filter-out $(CC_FLAGS_CFI) $(CFLAGS_GCOV), $(c_flags)) -c -o $@ $<
|
||||
|
||||
%.mod.o: %.mod.c FORCE
|
||||
$(call if_changed_dep,cc_o_c)
|
||||
|
||||
@@ -1303,6 +1303,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
|
||||
if (relsym->st_name != 0)
|
||||
return relsym;
|
||||
|
||||
/*
|
||||
* Strive to find a better symbol name, but the resulting name may not
|
||||
* match the symbol referenced in the original code.
|
||||
*/
|
||||
relsym_secindex = get_secindex(elf, relsym);
|
||||
for (sym = elf->symtab_start; sym < elf->symtab_stop; sym++) {
|
||||
if (get_secindex(elf, sym) != relsym_secindex)
|
||||
@@ -1607,49 +1611,12 @@ static void default_mismatch_handler(const char *modname, struct elf_info *elf,
|
||||
|
||||
static int is_executable_section(struct elf_info* elf, unsigned int section_index)
|
||||
{
|
||||
if (section_index > elf->num_sections)
|
||||
if (section_index >= elf->num_sections)
|
||||
fatal("section_index is outside elf->num_sections!\n");
|
||||
|
||||
return ((elf->sechdrs[section_index].sh_flags & SHF_EXECINSTR) == SHF_EXECINSTR);
|
||||
}
|
||||
|
||||
/*
|
||||
* We rely on a gross hack in section_rel[a]() calling find_extable_entry_size()
|
||||
* to know the sizeof(struct exception_table_entry) for the target architecture.
|
||||
*/
|
||||
static unsigned int extable_entry_size = 0;
|
||||
static void find_extable_entry_size(const char* const sec, const Elf_Rela* r)
|
||||
{
|
||||
/*
|
||||
* If we're currently checking the second relocation within __ex_table,
|
||||
* that relocation offset tells us the offsetof(struct
|
||||
* exception_table_entry, fixup) which is equal to sizeof(struct
|
||||
* exception_table_entry) divided by two. We use that to our advantage
|
||||
* since there's no portable way to get that size as every architecture
|
||||
* seems to go with different sized types. Not pretty but better than
|
||||
* hard-coding the size for every architecture..
|
||||
*/
|
||||
if (!extable_entry_size)
|
||||
extable_entry_size = r->r_offset * 2;
|
||||
}
|
||||
|
||||
static inline bool is_extable_fault_address(Elf_Rela *r)
|
||||
{
|
||||
/*
|
||||
* extable_entry_size is only discovered after we've handled the
|
||||
* _second_ relocation in __ex_table, so only abort when we're not
|
||||
* handling the first reloc and extable_entry_size is zero.
|
||||
*/
|
||||
if (r->r_offset && extable_entry_size == 0)
|
||||
fatal("extable_entry size hasn't been discovered!\n");
|
||||
|
||||
return ((r->r_offset == 0) ||
|
||||
(r->r_offset % extable_entry_size == 0));
|
||||
}
|
||||
|
||||
#define is_second_extable_reloc(Start, Cur, Sec) \
|
||||
(((Cur) == (Start) + 1) && (strcmp("__ex_table", (Sec)) == 0))
|
||||
|
||||
static void report_extable_warnings(const char* modname, struct elf_info* elf,
|
||||
const struct sectioncheck* const mismatch,
|
||||
Elf_Rela* r, Elf_Sym* sym,
|
||||
@@ -1706,22 +1673,9 @@ static void extable_mismatch_handler(const char* modname, struct elf_info *elf,
|
||||
"You might get more information about where this is\n"
|
||||
"coming from by using scripts/check_extable.sh %s\n",
|
||||
fromsec, (long)r->r_offset, tosec, modname);
|
||||
else if (!is_executable_section(elf, get_secindex(elf, sym))) {
|
||||
if (is_extable_fault_address(r))
|
||||
fatal("The relocation at %s+0x%lx references\n"
|
||||
"section \"%s\" which is not executable, IOW\n"
|
||||
"it is not possible for the kernel to fault\n"
|
||||
"at that address. Something is seriously wrong\n"
|
||||
"and should be fixed.\n",
|
||||
fromsec, (long)r->r_offset, tosec);
|
||||
else
|
||||
fatal("The relocation at %s+0x%lx references\n"
|
||||
"section \"%s\" which is not executable, IOW\n"
|
||||
"the kernel will fault if it ever tries to\n"
|
||||
"jump to it. Something is seriously wrong\n"
|
||||
"and should be fixed.\n",
|
||||
fromsec, (long)r->r_offset, tosec);
|
||||
}
|
||||
else if (!is_executable_section(elf, get_secindex(elf, sym)))
|
||||
error("%s+0x%lx references non-executable section '%s'\n",
|
||||
fromsec, (long)r->r_offset, tosec);
|
||||
}
|
||||
|
||||
static void check_section_mismatch(const char *modname, struct elf_info *elf,
|
||||
@@ -1782,19 +1736,33 @@ static int addend_386_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
|
||||
#define R_ARM_THM_JUMP19 51
|
||||
#endif
|
||||
|
||||
static int32_t sign_extend32(int32_t value, int index)
|
||||
{
|
||||
uint8_t shift = 31 - index;
|
||||
|
||||
return (int32_t)(value << shift) >> shift;
|
||||
}
|
||||
|
||||
static int addend_arm_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
|
||||
{
|
||||
unsigned int r_typ = ELF_R_TYPE(r->r_info);
|
||||
Elf_Sym *sym = elf->symtab_start + ELF_R_SYM(r->r_info);
|
||||
void *loc = reloc_location(elf, sechdr, r);
|
||||
uint32_t inst;
|
||||
int32_t offset;
|
||||
|
||||
switch (r_typ) {
|
||||
case R_ARM_ABS32:
|
||||
/* From ARM ABI: (S + A) | T */
|
||||
r->r_addend = (int)(long)
|
||||
(elf->symtab_start + ELF_R_SYM(r->r_info));
|
||||
inst = TO_NATIVE(*(uint32_t *)loc);
|
||||
r->r_addend = inst + sym->st_value;
|
||||
break;
|
||||
case R_ARM_PC24:
|
||||
case R_ARM_CALL:
|
||||
case R_ARM_JUMP24:
|
||||
inst = TO_NATIVE(*(uint32_t *)loc);
|
||||
offset = sign_extend32((inst & 0x00ffffff) << 2, 25);
|
||||
r->r_addend = offset + sym->st_value + 8;
|
||||
break;
|
||||
case R_ARM_THM_CALL:
|
||||
case R_ARM_THM_JUMP24:
|
||||
case R_ARM_THM_JUMP19:
|
||||
@@ -1872,8 +1840,6 @@ static void section_rela(const char *modname, struct elf_info *elf,
|
||||
/* Skip special sections */
|
||||
if (is_shndx_special(sym->st_shndx))
|
||||
continue;
|
||||
if (is_second_extable_reloc(start, rela, fromsec))
|
||||
find_extable_entry_size(fromsec, &r);
|
||||
check_section_mismatch(modname, elf, &r, sym, fromsec);
|
||||
}
|
||||
}
|
||||
@@ -1932,8 +1898,6 @@ static void section_rel(const char *modname, struct elf_info *elf,
|
||||
/* Skip special sections */
|
||||
if (is_shndx_special(sym->st_shndx))
|
||||
continue;
|
||||
if (is_second_extable_reloc(start, rel, fromsec))
|
||||
find_extable_entry_size(fromsec, &r);
|
||||
check_section_mismatch(modname, elf, &r, sym, fromsec);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user